2024-12-03 15:20:21,359 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-03 15:20:21,375 main DEBUG Took 0.013564 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-03 15:20:21,376 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-03 15:20:21,376 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-03 15:20:21,378 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-03 15:20:21,379 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,388 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-03 15:20:21,404 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,406 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,407 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,408 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,408 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,409 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,410 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,410 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,411 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,411 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,412 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,412 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,413 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,414 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,414 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,415 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,415 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,416 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,416 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,417 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,417 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,418 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,418 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,418 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-03 15:20:21,419 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,419 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-03 15:20:21,420 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-03 15:20:21,422 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-03 15:20:21,424 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-03 15:20:21,424 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-03 15:20:21,425 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-03 15:20:21,425 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-03 15:20:21,434 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-03 15:20:21,436 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-03 15:20:21,438 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-03 15:20:21,438 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-03 15:20:21,439 main DEBUG createAppenders(={Console}) 2024-12-03 15:20:21,439 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-03 15:20:21,440 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-03 15:20:21,440 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-03 15:20:21,440 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-03 15:20:21,441 main DEBUG OutputStream closed 2024-12-03 15:20:21,441 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-03 15:20:21,441 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-03 15:20:21,441 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-03 15:20:21,521 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-03 15:20:21,523 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-03 15:20:21,524 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-03 15:20:21,525 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-03 15:20:21,526 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-03 15:20:21,526 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-03 15:20:21,526 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-03 15:20:21,527 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-03 15:20:21,527 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-03 15:20:21,527 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-03 15:20:21,527 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-03 15:20:21,528 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-03 15:20:21,528 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-03 15:20:21,528 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-03 15:20:21,529 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-03 15:20:21,529 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-03 15:20:21,529 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-03 15:20:21,530 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-03 15:20:21,532 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03 15:20:21,532 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-03 15:20:21,533 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-03 15:20:21,533 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-03T15:20:21,775 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5 2024-12-03 15:20:21,778 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-03 15:20:21,779 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-03T15:20:21,787 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-03T15:20:21,806 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-03T15:20:21,809 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/cluster_ee7c9693-9d66-43f8-9a07-adf8cdb8bef5, deleteOnExit=true 2024-12-03T15:20:21,809 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-03T15:20:21,810 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/test.cache.data in system properties and HBase conf 2024-12-03T15:20:21,811 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/hadoop.tmp.dir in system properties and HBase conf 2024-12-03T15:20:21,811 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/hadoop.log.dir in system properties and HBase conf 2024-12-03T15:20:21,812 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-03T15:20:21,812 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-03T15:20:21,813 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-03T15:20:21,931 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-03T15:20:22,038 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-03T15:20:22,042 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-03T15:20:22,042 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-03T15:20:22,043 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-03T15:20:22,043 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T15:20:22,044 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-03T15:20:22,044 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-03T15:20:22,044 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-03T15:20:22,045 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T15:20:22,045 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-03T15:20:22,045 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/nfs.dump.dir in system properties and HBase conf 2024-12-03T15:20:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/java.io.tmpdir in system properties and HBase conf 2024-12-03T15:20:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-03T15:20:22,046 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-03T15:20:22,047 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-03T15:20:22,937 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-03T15:20:23,025 INFO [Time-limited test {}] log.Log(170): Logging initialized @2419ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-03T15:20:23,106 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T15:20:23,182 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T15:20:23,207 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T15:20:23,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T15:20:23,210 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T15:20:23,223 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T15:20:23,225 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/hadoop.log.dir/,AVAILABLE} 2024-12-03T15:20:23,227 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T15:20:23,436 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/java.io.tmpdir/jetty-localhost-34393-hadoop-hdfs-3_4_1-tests_jar-_-any-16856893185247937007/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T15:20:23,446 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:34393} 2024-12-03T15:20:23,446 INFO [Time-limited test {}] server.Server(415): Started @2842ms 2024-12-03T15:20:23,845 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-03T15:20:23,852 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-03T15:20:23,853 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-03T15:20:23,853 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-03T15:20:23,853 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-03T15:20:23,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/hadoop.log.dir/,AVAILABLE} 2024-12-03T15:20:23,855 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-03T15:20:23,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/java.io.tmpdir/jetty-localhost-37601-hadoop-hdfs-3_4_1-tests_jar-_-any-13692764260418582747/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T15:20:23,978 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:37601} 2024-12-03T15:20:23,978 INFO [Time-limited test {}] server.Server(415): Started @3373ms 2024-12-03T15:20:24,033 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-03T15:20:24,519 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/cluster_ee7c9693-9d66-43f8-9a07-adf8cdb8bef5/dfs/data/data1/current/BP-1589599033-172.17.0.2-1733239222697/current, will proceed with Du for space computation calculation, 2024-12-03T15:20:24,519 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/cluster_ee7c9693-9d66-43f8-9a07-adf8cdb8bef5/dfs/data/data2/current/BP-1589599033-172.17.0.2-1733239222697/current, will proceed with Du for space computation calculation, 2024-12-03T15:20:24,588 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-03T15:20:24,643 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8af31bb08ef4b628 with lease ID 0xc50eeac5cf4f3ada: Processing first storage report for DS-534bb076-267d-42fa-86f3-e79df68c5ea3 from datanode DatanodeRegistration(127.0.0.1:32963, datanodeUuid=a3ae884a-3479-4e2a-ad3f-1684a6ad8aaa, infoPort=42003, infoSecurePort=0, ipcPort=39809, storageInfo=lv=-57;cid=testClusterID;nsid=897138404;c=1733239222697) 2024-12-03T15:20:24,645 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8af31bb08ef4b628 with lease ID 0xc50eeac5cf4f3ada: from storage DS-534bb076-267d-42fa-86f3-e79df68c5ea3 node DatanodeRegistration(127.0.0.1:32963, datanodeUuid=a3ae884a-3479-4e2a-ad3f-1684a6ad8aaa, infoPort=42003, infoSecurePort=0, ipcPort=39809, storageInfo=lv=-57;cid=testClusterID;nsid=897138404;c=1733239222697), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-03T15:20:24,645 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8af31bb08ef4b628 with lease ID 0xc50eeac5cf4f3ada: Processing first storage report for DS-dcbec322-f358-4914-b603-4bc8e83f69b3 from datanode DatanodeRegistration(127.0.0.1:32963, datanodeUuid=a3ae884a-3479-4e2a-ad3f-1684a6ad8aaa, infoPort=42003, infoSecurePort=0, ipcPort=39809, storageInfo=lv=-57;cid=testClusterID;nsid=897138404;c=1733239222697) 2024-12-03T15:20:24,646 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8af31bb08ef4b628 with lease ID 0xc50eeac5cf4f3ada: from storage DS-dcbec322-f358-4914-b603-4bc8e83f69b3 node DatanodeRegistration(127.0.0.1:32963, datanodeUuid=a3ae884a-3479-4e2a-ad3f-1684a6ad8aaa, infoPort=42003, infoSecurePort=0, ipcPort=39809, storageInfo=lv=-57;cid=testClusterID;nsid=897138404;c=1733239222697), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-03T15:20:24,687 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5 2024-12-03T15:20:24,771 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/cluster_ee7c9693-9d66-43f8-9a07-adf8cdb8bef5/zookeeper_0, clientPort=60989, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/cluster_ee7c9693-9d66-43f8-9a07-adf8cdb8bef5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/cluster_ee7c9693-9d66-43f8-9a07-adf8cdb8bef5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-03T15:20:24,781 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=60989 2024-12-03T15:20:24,796 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T15:20:24,800 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T15:20:25,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741825_1001 (size=7) 2024-12-03T15:20:25,449 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411 with version=8 2024-12-03T15:20:25,449 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/hbase-staging 2024-12-03T15:20:25,584 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-03T15:20:25,841 INFO [Time-limited test {}] client.ConnectionUtils(129): master/2b5ef621a0dd:0 server-side Connection retries=45 2024-12-03T15:20:25,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T15:20:25,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T15:20:25,860 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T15:20:25,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T15:20:25,860 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T15:20:25,995 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T15:20:26,057 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-03T15:20:26,066 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-03T15:20:26,070 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T15:20:26,095 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 6141 (auto-detected) 2024-12-03T15:20:26,096 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-03T15:20:26,115 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36539 2024-12-03T15:20:26,123 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T15:20:26,126 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T15:20:26,139 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:36539 connecting to ZooKeeper ensemble=127.0.0.1:60989 2024-12-03T15:20:26,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:365390x0, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T15:20:26,177 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36539-0x1009f6fd8b40000 connected 2024-12-03T15:20:26,210 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T15:20:26,213 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T15:20:26,216 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T15:20:26,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36539 2024-12-03T15:20:26,221 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36539 2024-12-03T15:20:26,221 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36539 2024-12-03T15:20:26,222 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36539 2024-12-03T15:20:26,223 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36539 2024-12-03T15:20:26,230 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411, hbase.cluster.distributed=false 2024-12-03T15:20:26,293 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/2b5ef621a0dd:0 server-side Connection retries=45 2024-12-03T15:20:26,293 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T15:20:26,293 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-03T15:20:26,294 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-03T15:20:26,294 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-03T15:20:26,294 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-03T15:20:26,296 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-03T15:20:26,298 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-03T15:20:26,299 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46815 2024-12-03T15:20:26,301 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-03T15:20:26,307 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-03T15:20:26,308 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T15:20:26,311 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T15:20:26,314 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46815 connecting to ZooKeeper ensemble=127.0.0.1:60989 2024-12-03T15:20:26,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:468150x0, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-03T15:20:26,319 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46815-0x1009f6fd8b40001 connected 2024-12-03T15:20:26,319 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:468150x0, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T15:20:26,321 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T15:20:26,323 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-03T15:20:26,325 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46815 2024-12-03T15:20:26,325 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46815 2024-12-03T15:20:26,327 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46815 2024-12-03T15:20:26,328 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46815 2024-12-03T15:20:26,328 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46815 2024-12-03T15:20:26,333 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/2b5ef621a0dd,36539,1733239225577 2024-12-03T15:20:26,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T15:20:26,340 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T15:20:26,342 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/2b5ef621a0dd,36539,1733239225577 2024-12-03T15:20:26,351 DEBUG [M:0;2b5ef621a0dd:36539 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;2b5ef621a0dd:36539 2024-12-03T15:20:26,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T15:20:26,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:26,370 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T15:20:26,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-03T15:20:26,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:26,379 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-03T15:20:26,381 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/2b5ef621a0dd,36539,1733239225577 from backup master directory 2024-12-03T15:20:26,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/2b5ef621a0dd,36539,1733239225577 2024-12-03T15:20:26,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T15:20:26,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-03T15:20:26,390 WARN [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T15:20:26,390 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=2b5ef621a0dd,36539,1733239225577 2024-12-03T15:20:26,393 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-03T15:20:26,398 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-03T15:20:26,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741826_1002 (size=42) 2024-12-03T15:20:26,887 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/hbase.id with ID: c913f2dd-53aa-433f-9779-4459e642f263 2024-12-03T15:20:26,928 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-03T15:20:26,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:26,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:26,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741827_1003 (size=196) 2024-12-03T15:20:26,986 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T15:20:26,988 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-03T15:20:27,010 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:27,016 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T15:20:27,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741828_1004 (size=1189) 2024-12-03T15:20:27,472 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store 2024-12-03T15:20:27,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741829_1005 (size=34) 2024-12-03T15:20:27,502 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-03T15:20:27,503 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:20:27,504 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T15:20:27,504 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T15:20:27,505 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T15:20:27,505 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T15:20:27,505 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T15:20:27,505 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T15:20:27,505 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T15:20:27,509 WARN [master/2b5ef621a0dd:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/.initializing 2024-12-03T15:20:27,509 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/WALs/2b5ef621a0dd,36539,1733239225577 2024-12-03T15:20:27,519 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T15:20:27,535 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b5ef621a0dd%2C36539%2C1733239225577, suffix=, logDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/WALs/2b5ef621a0dd,36539,1733239225577, archiveDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/oldWALs, maxLogs=10 2024-12-03T15:20:27,569 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/WALs/2b5ef621a0dd,36539,1733239225577/2b5ef621a0dd%2C36539%2C1733239225577.1733239227542, exclude list is [], retry=0 2024-12-03T15:20:27,594 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32963,DS-534bb076-267d-42fa-86f3-e79df68c5ea3,DISK] 2024-12-03T15:20:27,597 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-03T15:20:27,648 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/WALs/2b5ef621a0dd,36539,1733239225577/2b5ef621a0dd%2C36539%2C1733239225577.1733239227542 2024-12-03T15:20:27,650 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42003:42003)] 2024-12-03T15:20:27,650 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-03T15:20:27,651 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:20:27,656 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T15:20:27,657 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T15:20:27,704 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T15:20:27,730 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-03T15:20:27,734 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:27,738 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T15:20:27,738 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T15:20:27,743 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-03T15:20:27,743 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:27,744 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:27,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T15:20:27,748 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-03T15:20:27,748 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:27,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:27,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-03T15:20:27,756 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-03T15:20:27,756 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:27,763 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:27,768 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T15:20:27,770 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-03T15:20:27,782 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-03T15:20:27,788 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-03T15:20:27,794 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T15:20:27,795 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59740990, jitterRate=-0.1097898781299591}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-03T15:20:27,801 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T15:20:27,803 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-03T15:20:27,845 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@adad2e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:27,893 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-03T15:20:27,909 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-03T15:20:27,910 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-03T15:20:27,913 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-03T15:20:27,914 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-03T15:20:27,920 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-12-03T15:20:27,920 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-03T15:20:27,951 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-03T15:20:27,967 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-03T15:20:27,970 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-03T15:20:27,973 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-03T15:20:27,975 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-03T15:20:27,978 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-03T15:20:27,980 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-03T15:20:27,985 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-03T15:20:27,989 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-03T15:20:27,991 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-03T15:20:27,994 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-03T15:20:28,008 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-03T15:20:28,010 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-03T15:20:28,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T15:20:28,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-03T15:20:28,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:28,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:28,017 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=2b5ef621a0dd,36539,1733239225577, sessionid=0x1009f6fd8b40000, setting cluster-up flag (Was=false) 2024-12-03T15:20:28,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:28,030 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:28,036 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-03T15:20:28,038 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2b5ef621a0dd,36539,1733239225577 2024-12-03T15:20:28,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:28,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:28,054 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-03T15:20:28,055 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=2b5ef621a0dd,36539,1733239225577 2024-12-03T15:20:28,154 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;2b5ef621a0dd:46815 2024-12-03T15:20:28,159 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1008): ClusterId : c913f2dd-53aa-433f-9779-4459e642f263 2024-12-03T15:20:28,162 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-03T15:20:28,167 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-03T15:20:28,173 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-03T15:20:28,173 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-03T15:20:28,173 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-03T15:20:28,176 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-03T15:20:28,176 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-03T15:20:28,177 DEBUG [RS:0;2b5ef621a0dd:46815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@297e34e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:28,179 DEBUG [RS:0;2b5ef621a0dd:46815 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42ddac64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2b5ef621a0dd/172.17.0.2:0 2024-12-03T15:20:28,181 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-03T15:20:28,182 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-03T15:20:28,182 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-03T15:20:28,181 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 2b5ef621a0dd,36539,1733239225577 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-03T15:20:28,184 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(3073): reportForDuty to master=2b5ef621a0dd,36539,1733239225577 with isa=2b5ef621a0dd/172.17.0.2:46815, startcode=1733239226292 2024-12-03T15:20:28,185 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/2b5ef621a0dd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T15:20:28,185 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/2b5ef621a0dd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T15:20:28,186 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/2b5ef621a0dd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T15:20:28,186 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/2b5ef621a0dd:0, corePoolSize=5, maxPoolSize=5 2024-12-03T15:20:28,186 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/2b5ef621a0dd:0, corePoolSize=10, maxPoolSize=10 2024-12-03T15:20:28,186 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/2b5ef621a0dd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T15:20:28,186 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/2b5ef621a0dd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T15:20:28,186 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/2b5ef621a0dd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T15:20:28,191 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T15:20:28,192 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733239258192 2024-12-03T15:20:28,192 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-03T15:20:28,193 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-03T15:20:28,195 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-03T15:20:28,197 DEBUG [RS:0;2b5ef621a0dd:46815 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T15:20:28,197 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:28,198 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T15:20:28,199 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-03T15:20:28,199 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-03T15:20:28,200 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-03T15:20:28,200 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-03T15:20:28,202 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:28,203 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-03T15:20:28,204 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-03T15:20:28,205 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-03T15:20:28,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741831_1007 (size=1039) 2024-12-03T15:20:28,212 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-03T15:20:28,212 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411 2024-12-03T15:20:28,214 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-03T15:20:28,214 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-03T15:20:28,217 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/2b5ef621a0dd:0:becomeActiveMaster-HFileCleaner.large.0-1733239228216,5,FailOnTimeoutGroup] 2024-12-03T15:20:28,223 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/2b5ef621a0dd:0:becomeActiveMaster-HFileCleaner.small.0-1733239228218,5,FailOnTimeoutGroup] 2024-12-03T15:20:28,223 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:28,223 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-03T15:20:28,225 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:28,225 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:28,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741832_1008 (size=32) 2024-12-03T15:20:28,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:20:28,250 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T15:20:28,252 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50281, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T15:20:28,256 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T15:20:28,256 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:28,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T15:20:28,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T15:20:28,262 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36539 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:28,262 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T15:20:28,262 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:28,265 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36539 {}] master.ServerManager(486): Registering regionserver=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:28,264 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T15:20:28,265 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T15:20:28,271 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T15:20:28,271 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:28,273 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T15:20:28,275 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740 2024-12-03T15:20:28,276 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740 2024-12-03T15:20:28,280 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T15:20:28,284 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T15:20:28,290 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411 2024-12-03T15:20:28,290 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:44673 2024-12-03T15:20:28,290 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-03T15:20:28,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T15:20:28,297 DEBUG [RS:0;2b5ef621a0dd:46815 {}] zookeeper.ZKUtil(111): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:28,297 WARN [RS:0;2b5ef621a0dd:46815 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-03T15:20:28,297 INFO [RS:0;2b5ef621a0dd:46815 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T15:20:28,297 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/WALs/2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:28,298 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T15:20:28,300 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69652111, jitterRate=0.03789733350276947}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T15:20:28,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T15:20:28,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T15:20:28,304 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T15:20:28,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T15:20:28,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T15:20:28,305 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T15:20:28,309 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T15:20:28,309 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T15:20:28,310 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [2b5ef621a0dd,46815,1733239226292] 2024-12-03T15:20:28,313 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-03T15:20:28,314 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-03T15:20:28,322 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-03T15:20:28,328 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-03T15:20:28,337 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-03T15:20:28,341 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-03T15:20:28,350 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-03T15:20:28,364 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-03T15:20:28,367 INFO [RS:0;2b5ef621a0dd:46815 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-03T15:20:28,368 INFO [RS:0;2b5ef621a0dd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:28,369 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-03T15:20:28,378 INFO [RS:0;2b5ef621a0dd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:28,378 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/2b5ef621a0dd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T15:20:28,378 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/2b5ef621a0dd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T15:20:28,378 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T15:20:28,378 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T15:20:28,378 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/2b5ef621a0dd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T15:20:28,382 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/2b5ef621a0dd:0, corePoolSize=2, maxPoolSize=2 2024-12-03T15:20:28,382 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T15:20:28,382 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/2b5ef621a0dd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T15:20:28,383 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/2b5ef621a0dd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T15:20:28,383 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/2b5ef621a0dd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T15:20:28,383 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/2b5ef621a0dd:0, corePoolSize=1, maxPoolSize=1 2024-12-03T15:20:28,383 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/2b5ef621a0dd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T15:20:28,383 DEBUG [RS:0;2b5ef621a0dd:46815 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0, corePoolSize=3, maxPoolSize=3 2024-12-03T15:20:28,385 INFO [RS:0;2b5ef621a0dd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:28,386 INFO [RS:0;2b5ef621a0dd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:28,386 INFO [RS:0;2b5ef621a0dd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:28,386 INFO [RS:0;2b5ef621a0dd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:28,387 INFO [RS:0;2b5ef621a0dd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b5ef621a0dd,46815,1733239226292-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T15:20:28,418 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-03T15:20:28,421 INFO [RS:0;2b5ef621a0dd:46815 {}] hbase.ChoreService(168): Chore ScheduledChore name=2b5ef621a0dd,46815,1733239226292-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:28,455 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.Replication(204): 2b5ef621a0dd,46815,1733239226292 started 2024-12-03T15:20:28,455 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1767): Serving as 2b5ef621a0dd,46815,1733239226292, RpcServer on 2b5ef621a0dd/172.17.0.2:46815, sessionid=0x1009f6fd8b40001 2024-12-03T15:20:28,456 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-03T15:20:28,456 DEBUG [RS:0;2b5ef621a0dd:46815 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:28,457 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b5ef621a0dd,46815,1733239226292' 2024-12-03T15:20:28,457 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-03T15:20:28,458 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-03T15:20:28,459 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-03T15:20:28,459 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-03T15:20:28,460 DEBUG [RS:0;2b5ef621a0dd:46815 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:28,460 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '2b5ef621a0dd,46815,1733239226292' 2024-12-03T15:20:28,460 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-03T15:20:28,461 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-03T15:20:28,463 DEBUG [RS:0;2b5ef621a0dd:46815 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-03T15:20:28,463 INFO [RS:0;2b5ef621a0dd:46815 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-03T15:20:28,463 INFO [RS:0;2b5ef621a0dd:46815 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-03T15:20:28,492 WARN [2b5ef621a0dd:36539 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-03T15:20:28,571 INFO [RS:0;2b5ef621a0dd:46815 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-03T15:20:28,576 INFO [RS:0;2b5ef621a0dd:46815 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b5ef621a0dd%2C46815%2C1733239226292, suffix=, logDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/WALs/2b5ef621a0dd,46815,1733239226292, archiveDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/oldWALs, maxLogs=32 2024-12-03T15:20:28,602 DEBUG [RS:0;2b5ef621a0dd:46815 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/WALs/2b5ef621a0dd,46815,1733239226292/2b5ef621a0dd%2C46815%2C1733239226292.1733239228579, exclude list is [], retry=0 2024-12-03T15:20:28,614 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32963,DS-534bb076-267d-42fa-86f3-e79df68c5ea3,DISK] 2024-12-03T15:20:28,622 INFO [RS:0;2b5ef621a0dd:46815 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/WALs/2b5ef621a0dd,46815,1733239226292/2b5ef621a0dd%2C46815%2C1733239226292.1733239228579 2024-12-03T15:20:28,626 DEBUG [RS:0;2b5ef621a0dd:46815 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42003:42003)] 2024-12-03T15:20:28,744 DEBUG [2b5ef621a0dd:36539 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-03T15:20:28,750 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:28,758 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2b5ef621a0dd,46815,1733239226292, state=OPENING 2024-12-03T15:20:28,767 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-03T15:20:28,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:28,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:28,771 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T15:20:28,771 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T15:20:28,775 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:20:28,960 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:28,961 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-03T15:20:28,967 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-03T15:20:28,979 INFO [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-03T15:20:28,979 INFO [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-03T15:20:28,980 INFO [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-03T15:20:28,983 INFO [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=2b5ef621a0dd%2C46815%2C1733239226292.meta, suffix=.meta, logDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/WALs/2b5ef621a0dd,46815,1733239226292, archiveDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/oldWALs, maxLogs=32 2024-12-03T15:20:29,002 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/WALs/2b5ef621a0dd,46815,1733239226292/2b5ef621a0dd%2C46815%2C1733239226292.meta.1733239228985.meta, exclude list is [], retry=0 2024-12-03T15:20:29,007 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32963,DS-534bb076-267d-42fa-86f3-e79df68c5ea3,DISK] 2024-12-03T15:20:29,011 INFO [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/WALs/2b5ef621a0dd,46815,1733239226292/2b5ef621a0dd%2C46815%2C1733239226292.meta.1733239228985.meta 2024-12-03T15:20:29,012 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42003:42003)] 2024-12-03T15:20:29,012 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-03T15:20:29,014 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-03T15:20:29,096 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-03T15:20:29,103 INFO [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-03T15:20:29,109 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-03T15:20:29,109 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:20:29,110 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-03T15:20:29,110 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-03T15:20:29,117 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-03T15:20:29,119 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-03T15:20:29,119 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:29,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T15:20:29,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-03T15:20:29,125 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-03T15:20:29,125 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:29,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T15:20:29,131 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-03T15:20:29,133 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-03T15:20:29,133 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:29,135 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-03T15:20:29,138 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740 2024-12-03T15:20:29,147 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740 2024-12-03T15:20:29,151 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T15:20:29,160 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-03T15:20:29,164 INFO [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65781971, jitterRate=-0.019772246479988098}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T15:20:29,167 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-03T15:20:29,180 INFO [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733239228952 2024-12-03T15:20:29,206 DEBUG [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-03T15:20:29,206 INFO [RS_OPEN_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-03T15:20:29,210 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:29,217 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 2b5ef621a0dd,46815,1733239226292, state=OPEN 2024-12-03T15:20:29,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T15:20:29,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-03T15:20:29,226 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T15:20:29,226 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-03T15:20:29,238 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-03T15:20:29,239 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=2b5ef621a0dd,46815,1733239226292 in 451 msec 2024-12-03T15:20:29,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-03T15:20:29,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 919 msec 2024-12-03T15:20:29,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.1550 sec 2024-12-03T15:20:29,282 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733239229282, completionTime=-1 2024-12-03T15:20:29,283 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-03T15:20:29,283 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-03T15:20:29,341 DEBUG [hconnection-0x240475eb-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:29,345 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41100, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:29,365 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-03T15:20:29,365 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733239289365 2024-12-03T15:20:29,365 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733239349365 2024-12-03T15:20:29,365 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 82 msec 2024-12-03T15:20:29,434 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b5ef621a0dd,36539,1733239225577-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:29,435 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b5ef621a0dd,36539,1733239225577-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:29,435 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b5ef621a0dd,36539,1733239225577-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:29,437 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-2b5ef621a0dd:36539, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:29,440 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:29,448 DEBUG [master/2b5ef621a0dd:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-03T15:20:29,452 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-03T15:20:29,454 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-03T15:20:29,471 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-03T15:20:29,479 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T15:20:29,481 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:29,486 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T15:20:29,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741835_1011 (size=358) 2024-12-03T15:20:29,519 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 31c39c5a8622ff80b89b6cf13dfade9c, NAME => 'hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411 2024-12-03T15:20:29,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741836_1012 (size=42) 2024-12-03T15:20:29,539 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:20:29,540 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 31c39c5a8622ff80b89b6cf13dfade9c, disabling compactions & flushes 2024-12-03T15:20:29,540 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. 2024-12-03T15:20:29,540 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. 2024-12-03T15:20:29,540 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. after waiting 0 ms 2024-12-03T15:20:29,540 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. 2024-12-03T15:20:29,540 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. 2024-12-03T15:20:29,540 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 31c39c5a8622ff80b89b6cf13dfade9c: 2024-12-03T15:20:29,543 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T15:20:29,549 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733239229544"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733239229544"}]},"ts":"1733239229544"} 2024-12-03T15:20:29,585 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T15:20:29,589 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T15:20:29,593 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239229589"}]},"ts":"1733239229589"} 2024-12-03T15:20:29,606 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-03T15:20:29,612 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=31c39c5a8622ff80b89b6cf13dfade9c, ASSIGN}] 2024-12-03T15:20:29,616 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=31c39c5a8622ff80b89b6cf13dfade9c, ASSIGN 2024-12-03T15:20:29,618 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=31c39c5a8622ff80b89b6cf13dfade9c, ASSIGN; state=OFFLINE, location=2b5ef621a0dd,46815,1733239226292; forceNewPlan=false, retain=false 2024-12-03T15:20:29,769 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=31c39c5a8622ff80b89b6cf13dfade9c, regionState=OPENING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:29,777 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 31c39c5a8622ff80b89b6cf13dfade9c, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:20:29,934 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:29,941 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. 2024-12-03T15:20:29,941 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 31c39c5a8622ff80b89b6cf13dfade9c, NAME => 'hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c.', STARTKEY => '', ENDKEY => ''} 2024-12-03T15:20:29,942 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 31c39c5a8622ff80b89b6cf13dfade9c 2024-12-03T15:20:29,942 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:20:29,943 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 31c39c5a8622ff80b89b6cf13dfade9c 2024-12-03T15:20:29,943 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 31c39c5a8622ff80b89b6cf13dfade9c 2024-12-03T15:20:29,951 INFO [StoreOpener-31c39c5a8622ff80b89b6cf13dfade9c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 31c39c5a8622ff80b89b6cf13dfade9c 2024-12-03T15:20:29,956 INFO [StoreOpener-31c39c5a8622ff80b89b6cf13dfade9c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 31c39c5a8622ff80b89b6cf13dfade9c columnFamilyName info 2024-12-03T15:20:29,956 DEBUG [StoreOpener-31c39c5a8622ff80b89b6cf13dfade9c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:29,958 INFO [StoreOpener-31c39c5a8622ff80b89b6cf13dfade9c-1 {}] regionserver.HStore(327): Store=31c39c5a8622ff80b89b6cf13dfade9c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:29,960 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/namespace/31c39c5a8622ff80b89b6cf13dfade9c 2024-12-03T15:20:29,962 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/namespace/31c39c5a8622ff80b89b6cf13dfade9c 2024-12-03T15:20:29,969 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 31c39c5a8622ff80b89b6cf13dfade9c 2024-12-03T15:20:29,983 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/namespace/31c39c5a8622ff80b89b6cf13dfade9c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T15:20:29,986 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 31c39c5a8622ff80b89b6cf13dfade9c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68206539, jitterRate=0.016356632113456726}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-03T15:20:29,988 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 31c39c5a8622ff80b89b6cf13dfade9c: 2024-12-03T15:20:29,991 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c., pid=6, masterSystemTime=1733239229933 2024-12-03T15:20:30,005 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=31c39c5a8622ff80b89b6cf13dfade9c, regionState=OPEN, openSeqNum=2, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:30,007 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. 2024-12-03T15:20:30,007 INFO [RS_OPEN_PRIORITY_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. 2024-12-03T15:20:30,023 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-03T15:20:30,024 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 31c39c5a8622ff80b89b6cf13dfade9c, server=2b5ef621a0dd,46815,1733239226292 in 235 msec 2024-12-03T15:20:30,031 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-03T15:20:30,031 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=31c39c5a8622ff80b89b6cf13dfade9c, ASSIGN in 411 msec 2024-12-03T15:20:30,033 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T15:20:30,034 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239230033"}]},"ts":"1733239230033"} 2024-12-03T15:20:30,044 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-03T15:20:30,049 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T15:20:30,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 594 msec 2024-12-03T15:20:30,078 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-03T15:20:30,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-03T15:20:30,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:30,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:20:30,113 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-03T15:20:30,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T15:20:30,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 27 msec 2024-12-03T15:20:30,148 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-03T15:20:30,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-03T15:20:30,168 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 20 msec 2024-12-03T15:20:30,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-03T15:20:30,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-03T15:20:30,178 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.788sec 2024-12-03T15:20:30,180 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-03T15:20:30,181 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-03T15:20:30,182 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-03T15:20:30,183 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-03T15:20:30,183 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-03T15:20:30,184 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b5ef621a0dd,36539,1733239225577-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-03T15:20:30,185 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b5ef621a0dd,36539,1733239225577-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-03T15:20:30,193 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-03T15:20:30,194 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-03T15:20:30,194 INFO [master/2b5ef621a0dd:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=2b5ef621a0dd,36539,1733239225577-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-03T15:20:30,259 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e83c466 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39dee83f 2024-12-03T15:20:30,260 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-03T15:20:30,269 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b8b597, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:30,275 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-03T15:20:30,275 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-03T15:20:30,289 DEBUG [hconnection-0x4c09ef46-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:30,306 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41114, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:30,317 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=2b5ef621a0dd,36539,1733239225577 2024-12-03T15:20:30,336 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=474, ProcessCount=11, AvailableMemoryMB=3002 2024-12-03T15:20:30,351 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T15:20:30,359 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55134, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T15:20:30,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-03T15:20:30,375 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T15:20:30,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-03T15:20:30,408 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T15:20:30,409 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:30,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-03T15:20:30,416 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T15:20:30,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T15:20:30,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741837_1013 (size=963) 2024-12-03T15:20:30,445 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411 2024-12-03T15:20:30,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741838_1014 (size=53) 2024-12-03T15:20:30,478 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:20:30,479 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 267a7e743c7c4973345ceaeae71cae1f, disabling compactions & flushes 2024-12-03T15:20:30,479 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:30,479 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:30,479 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. after waiting 0 ms 2024-12-03T15:20:30,479 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:30,479 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:30,479 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:30,482 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T15:20:30,482 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733239230482"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733239230482"}]},"ts":"1733239230482"} 2024-12-03T15:20:30,486 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T15:20:30,487 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T15:20:30,488 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239230487"}]},"ts":"1733239230487"} 2024-12-03T15:20:30,491 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-03T15:20:30,501 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=267a7e743c7c4973345ceaeae71cae1f, ASSIGN}] 2024-12-03T15:20:30,504 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=267a7e743c7c4973345ceaeae71cae1f, ASSIGN 2024-12-03T15:20:30,506 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=267a7e743c7c4973345ceaeae71cae1f, ASSIGN; state=OFFLINE, location=2b5ef621a0dd,46815,1733239226292; forceNewPlan=false, retain=false 2024-12-03T15:20:30,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T15:20:30,657 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=267a7e743c7c4973345ceaeae71cae1f, regionState=OPENING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:30,661 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:20:30,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T15:20:30,816 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:30,823 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:30,824 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} 2024-12-03T15:20:30,824 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:30,824 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:20:30,824 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:30,824 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:30,827 INFO [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:30,832 INFO [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:20:30,832 INFO [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 267a7e743c7c4973345ceaeae71cae1f columnFamilyName A 2024-12-03T15:20:30,833 DEBUG [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:30,834 INFO [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] regionserver.HStore(327): Store=267a7e743c7c4973345ceaeae71cae1f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:30,834 INFO [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:30,837 INFO [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:20:30,837 INFO [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 267a7e743c7c4973345ceaeae71cae1f columnFamilyName B 2024-12-03T15:20:30,838 DEBUG [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:30,839 INFO [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] regionserver.HStore(327): Store=267a7e743c7c4973345ceaeae71cae1f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:30,839 INFO [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:30,841 INFO [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:20:30,842 INFO [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 267a7e743c7c4973345ceaeae71cae1f columnFamilyName C 2024-12-03T15:20:30,842 DEBUG [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:30,843 INFO [StoreOpener-267a7e743c7c4973345ceaeae71cae1f-1 {}] regionserver.HStore(327): Store=267a7e743c7c4973345ceaeae71cae1f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:30,843 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:30,845 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:30,846 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:30,848 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T15:20:30,851 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:30,855 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T15:20:30,856 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 267a7e743c7c4973345ceaeae71cae1f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71982055, jitterRate=0.07261620461940765}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T15:20:30,858 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:30,860 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., pid=11, masterSystemTime=1733239230816 2024-12-03T15:20:30,864 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:30,864 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:30,865 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=267a7e743c7c4973345ceaeae71cae1f, regionState=OPEN, openSeqNum=2, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:30,873 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-03T15:20:30,875 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 in 208 msec 2024-12-03T15:20:30,877 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-03T15:20:30,877 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=267a7e743c7c4973345ceaeae71cae1f, ASSIGN in 372 msec 2024-12-03T15:20:30,878 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T15:20:30,878 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239230878"}]},"ts":"1733239230878"} 2024-12-03T15:20:30,881 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-03T15:20:30,885 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T15:20:30,889 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 510 msec 2024-12-03T15:20:31,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-03T15:20:31,038 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-03T15:20:31,043 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e98ea32 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b9fcedf 2024-12-03T15:20:31,047 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e71e468, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:31,049 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:31,052 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41124, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:31,055 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T15:20:31,057 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55144, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T15:20:31,064 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12885408 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9bd0964 2024-12-03T15:20:31,068 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c63ae4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:31,069 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72b32f98 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1324ee83 2024-12-03T15:20:31,073 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736f1673, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:31,074 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04977266 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45b55c24 2024-12-03T15:20:31,078 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ee2166f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:31,079 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bbb5d8a to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@48068a5 2024-12-03T15:20:31,082 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f34ff67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:31,083 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18603bb9 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3883f7b 2024-12-03T15:20:31,086 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b5f27aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:31,088 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72e97e4b to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12a1285d 2024-12-03T15:20:31,092 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3b736e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:31,093 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x490457fd to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@527c6d40 2024-12-03T15:20:31,097 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bc462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:31,099 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c8de680 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47fe2fa7 2024-12-03T15:20:31,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6502d571, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:31,104 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f6b07e3 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@595e9ebe 2024-12-03T15:20:31,107 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a0471b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:31,114 DEBUG [hconnection-0x4360c845-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:31,114 DEBUG [hconnection-0x703098f9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:31,114 DEBUG [hconnection-0x64ccd404-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:31,115 DEBUG [hconnection-0x4872f2f9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:31,115 DEBUG [hconnection-0x42186a3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:31,116 DEBUG [hconnection-0x53f1d8ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:31,117 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41138, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:31,118 DEBUG [hconnection-0x2216ae23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:31,120 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:31,122 DEBUG [hconnection-0x5d4164ec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:31,123 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:20:31,124 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:31,126 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41166, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:31,127 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41168, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:31,128 DEBUG [hconnection-0x2dd15ceb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:31,130 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:31,130 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:31,131 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41178, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:31,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-03T15:20:31,136 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:20:31,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-03T15:20:31,137 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:20:31,139 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:20:31,143 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41196, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:31,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:31,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:20:31,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:31,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:31,239 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:31,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-03T15:20:31,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:31,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:31,241 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:31,312 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-03T15:20:31,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:31,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:31,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:31,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/c4101d21d5f943e0bfafab6200e58ee6 is 50, key is test_row_0/A:col10/1733239231195/Put/seqid=0 2024-12-03T15:20:31,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-03T15:20:31,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741839_1015 (size=16681) 2024-12-03T15:20:31,458 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/c4101d21d5f943e0bfafab6200e58ee6 2024-12-03T15:20:31,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239291412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239291427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239291465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239291467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239291467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,517 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-03T15:20:31,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:31,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:31,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:31,521 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239291606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239291607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b7821a6a4cd44a029917cec60abc90e0 is 50, key is test_row_0/B:col10/1733239231195/Put/seqid=0 2024-12-03T15:20:31,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239291608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239291610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239291610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741840_1016 (size=12001) 2024-12-03T15:20:31,677 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-03T15:20:31,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:31,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:31,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:31,679 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-03T15:20:31,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239291817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239291819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239291821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239291824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:31,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239291823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,833 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-03T15:20:31,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:31,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:31,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:31,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,995 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:31,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-03T15:20:31,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:31,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:31,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:31,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:31,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b7821a6a4cd44a029917cec60abc90e0 2024-12-03T15:20:32,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/69ba7f67583f404f82736c01dadcb4d6 is 50, key is test_row_0/C:col10/1733239231195/Put/seqid=0 2024-12-03T15:20:32,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:32,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239292127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:32,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239292129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:32,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239292129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:32,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239292129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:32,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239292129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741841_1017 (size=12001) 2024-12-03T15:20:32,152 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-03T15:20:32,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:32,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:32,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:32,159 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-03T15:20:32,315 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-03T15:20:32,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:32,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:32,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:32,316 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,471 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-03T15:20:32,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:32,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:32,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:32,473 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/69ba7f67583f404f82736c01dadcb4d6 2024-12-03T15:20:32,628 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-03T15:20:32,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:32,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:32,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:32,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:32,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239292636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:32,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239292639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:32,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239292642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:32,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239292645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/c4101d21d5f943e0bfafab6200e58ee6 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/c4101d21d5f943e0bfafab6200e58ee6 2024-12-03T15:20:32,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:32,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239292650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,705 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/c4101d21d5f943e0bfafab6200e58ee6, entries=250, sequenceid=14, filesize=16.3 K 2024-12-03T15:20:32,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b7821a6a4cd44a029917cec60abc90e0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b7821a6a4cd44a029917cec60abc90e0 2024-12-03T15:20:32,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b7821a6a4cd44a029917cec60abc90e0, entries=150, sequenceid=14, filesize=11.7 K 2024-12-03T15:20:32,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/69ba7f67583f404f82736c01dadcb4d6 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/69ba7f67583f404f82736c01dadcb4d6 2024-12-03T15:20:32,792 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-03T15:20:32,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:32,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:32,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:32,794 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:32,807 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/69ba7f67583f404f82736c01dadcb4d6, entries=150, sequenceid=14, filesize=11.7 K 2024-12-03T15:20:32,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 267a7e743c7c4973345ceaeae71cae1f in 1593ms, sequenceid=14, compaction requested=false 2024-12-03T15:20:32,817 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-03T15:20:32,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:32,954 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:32,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-03T15:20:32,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:32,956 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-03T15:20:32,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:32,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:32,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:32,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:32,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:32,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:32,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/e98fcb4198d444a29c361defc3edd987 is 50, key is test_row_0/A:col10/1733239231451/Put/seqid=0 2024-12-03T15:20:33,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741842_1018 (size=12001) 2024-12-03T15:20:33,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-03T15:20:33,412 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/e98fcb4198d444a29c361defc3edd987 2024-12-03T15:20:33,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/ad67661e41534bbc883c193d9380c877 is 50, key is test_row_0/B:col10/1733239231451/Put/seqid=0 2024-12-03T15:20:33,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741843_1019 (size=12001) 2024-12-03T15:20:33,506 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/ad67661e41534bbc883c193d9380c877 2024-12-03T15:20:33,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/931a88467b9147678a223b2f3720cb31 is 50, key is test_row_0/C:col10/1733239231451/Put/seqid=0 2024-12-03T15:20:33,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741844_1020 (size=12001) 2024-12-03T15:20:33,619 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/931a88467b9147678a223b2f3720cb31 2024-12-03T15:20:33,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/e98fcb4198d444a29c361defc3edd987 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/e98fcb4198d444a29c361defc3edd987 2024-12-03T15:20:33,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:33,654 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/e98fcb4198d444a29c361defc3edd987, entries=150, sequenceid=39, filesize=11.7 K 2024-12-03T15:20:33,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:33,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/ad67661e41534bbc883c193d9380c877 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ad67661e41534bbc883c193d9380c877 2024-12-03T15:20:33,672 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ad67661e41534bbc883c193d9380c877, entries=150, sequenceid=39, filesize=11.7 K 2024-12-03T15:20:33,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/931a88467b9147678a223b2f3720cb31 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/931a88467b9147678a223b2f3720cb31 2024-12-03T15:20:33,690 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/931a88467b9147678a223b2f3720cb31, entries=150, sequenceid=39, filesize=11.7 K 2024-12-03T15:20:33,692 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=33.54 KB/34350 for 267a7e743c7c4973345ceaeae71cae1f in 736ms, sequenceid=39, compaction requested=false 2024-12-03T15:20:33,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:33,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:33,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-03T15:20:33,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-03T15:20:33,710 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-03T15:20:33,710 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5670 sec 2024-12-03T15:20:33,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:33,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:20:33,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:33,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:33,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:33,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:33,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:33,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:33,714 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.5870 sec 2024-12-03T15:20:33,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/49d2d662a3f94a4891e5f268c13787f0 is 50, key is test_row_0/A:col10/1733239233669/Put/seqid=0 2024-12-03T15:20:33,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741845_1021 (size=16681) 2024-12-03T15:20:33,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/49d2d662a3f94a4891e5f268c13787f0 2024-12-03T15:20:33,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/bc338f05a2a740b598d9af5a0e2f31f6 is 50, key is test_row_0/B:col10/1733239233669/Put/seqid=0 2024-12-03T15:20:33,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741846_1022 (size=12001) 2024-12-03T15:20:33,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/bc338f05a2a740b598d9af5a0e2f31f6 2024-12-03T15:20:33,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/a9169c751e714070a17a2789a0ea8048 is 50, key is test_row_0/C:col10/1733239233669/Put/seqid=0 2024-12-03T15:20:33,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:33,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239293861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:33,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741847_1023 (size=12001) 2024-12-03T15:20:33,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:33,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239293872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:33,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:33,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/a9169c751e714070a17a2789a0ea8048 2024-12-03T15:20:33,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239293874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:33,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:33,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239293880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:33,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:33,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239293881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:33,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/49d2d662a3f94a4891e5f268c13787f0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/49d2d662a3f94a4891e5f268c13787f0 2024-12-03T15:20:33,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/49d2d662a3f94a4891e5f268c13787f0, entries=250, sequenceid=50, filesize=16.3 K 2024-12-03T15:20:33,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/bc338f05a2a740b598d9af5a0e2f31f6 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/bc338f05a2a740b598d9af5a0e2f31f6 2024-12-03T15:20:33,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/bc338f05a2a740b598d9af5a0e2f31f6, entries=150, sequenceid=50, filesize=11.7 K 2024-12-03T15:20:34,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/a9169c751e714070a17a2789a0ea8048 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a9169c751e714070a17a2789a0ea8048 2024-12-03T15:20:34,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a9169c751e714070a17a2789a0ea8048, entries=150, sequenceid=50, filesize=11.7 K 2024-12-03T15:20:34,028 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 267a7e743c7c4973345ceaeae71cae1f in 316ms, sequenceid=50, compaction requested=true 2024-12-03T15:20:34,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:34,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:34,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:20:34,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:34,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:20:34,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:34,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:20:34,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:34,042 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:34,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-03T15:20:34,045 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:34,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:34,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:34,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:34,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:34,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:34,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:34,048 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:34,049 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/B is initiating minor compaction (all files) 2024-12-03T15:20:34,050 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/B in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:34,050 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b7821a6a4cd44a029917cec60abc90e0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ad67661e41534bbc883c193d9380c877, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/bc338f05a2a740b598d9af5a0e2f31f6] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=35.2 K 2024-12-03T15:20:34,052 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b7821a6a4cd44a029917cec60abc90e0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733239231195 2024-12-03T15:20:34,053 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting ad67661e41534bbc883c193d9380c877, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733239231405 2024-12-03T15:20:34,055 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting bc338f05a2a740b598d9af5a0e2f31f6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733239233669 2024-12-03T15:20:34,055 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45363 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:34,055 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/A is initiating minor compaction (all files) 2024-12-03T15:20:34,055 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/A in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:34,056 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/c4101d21d5f943e0bfafab6200e58ee6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/e98fcb4198d444a29c361defc3edd987, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/49d2d662a3f94a4891e5f268c13787f0] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=44.3 K 2024-12-03T15:20:34,057 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4101d21d5f943e0bfafab6200e58ee6, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733239231177 2024-12-03T15:20:34,057 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting e98fcb4198d444a29c361defc3edd987, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733239231405 2024-12-03T15:20:34,059 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49d2d662a3f94a4891e5f268c13787f0, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733239233665 2024-12-03T15:20:34,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/58a1b04ed4ea4ff6a571ec53c477ab1b is 50, key is test_row_0/A:col10/1733239234040/Put/seqid=0 2024-12-03T15:20:34,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239294060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239294064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239294080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239294084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239294084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,110 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#A#compaction#10 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:34,111 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/8ee0aa2ffb6543aba188fb648c52035e is 50, key is test_row_0/A:col10/1733239233669/Put/seqid=0 2024-12-03T15:20:34,116 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#B#compaction#11 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:34,117 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/24a366f2ea334141b243fe264614e7d5 is 50, key is test_row_0/B:col10/1733239233669/Put/seqid=0 2024-12-03T15:20:34,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741848_1024 (size=12001) 2024-12-03T15:20:34,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/58a1b04ed4ea4ff6a571ec53c477ab1b 2024-12-03T15:20:34,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741849_1025 (size=12104) 2024-12-03T15:20:34,163 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/8ee0aa2ffb6543aba188fb648c52035e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8ee0aa2ffb6543aba188fb648c52035e 2024-12-03T15:20:34,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/131759d83bc146738c13d55a653d9af1 is 50, key is test_row_0/B:col10/1733239234040/Put/seqid=0 2024-12-03T15:20:34,190 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/A of 267a7e743c7c4973345ceaeae71cae1f into 8ee0aa2ffb6543aba188fb648c52035e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:34,190 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:34,190 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/A, priority=13, startTime=1733239234031; duration=0sec 2024-12-03T15:20:34,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741850_1026 (size=12104) 2024-12-03T15:20:34,193 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:34,193 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:A 2024-12-03T15:20:34,193 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:34,196 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:34,196 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/C is initiating minor compaction (all files) 2024-12-03T15:20:34,196 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/C in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:34,197 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/69ba7f67583f404f82736c01dadcb4d6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/931a88467b9147678a223b2f3720cb31, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a9169c751e714070a17a2789a0ea8048] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=35.2 K 2024-12-03T15:20:34,198 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69ba7f67583f404f82736c01dadcb4d6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733239231195 2024-12-03T15:20:34,200 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 931a88467b9147678a223b2f3720cb31, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733239231405 2024-12-03T15:20:34,201 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9169c751e714070a17a2789a0ea8048, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733239233669 2024-12-03T15:20:34,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741851_1027 (size=12001) 2024-12-03T15:20:34,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239294187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239294187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239294189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239294194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239294195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,230 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#C#compaction#13 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:34,231 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/5a82592f42f44320a1147458cba93af4 is 50, key is test_row_0/C:col10/1733239233669/Put/seqid=0 2024-12-03T15:20:34,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741852_1028 (size=12104) 2024-12-03T15:20:34,263 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/5a82592f42f44320a1147458cba93af4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/5a82592f42f44320a1147458cba93af4 2024-12-03T15:20:34,276 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/C of 267a7e743c7c4973345ceaeae71cae1f into 5a82592f42f44320a1147458cba93af4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:34,276 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:34,276 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/C, priority=13, startTime=1733239234042; duration=0sec 2024-12-03T15:20:34,277 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:34,277 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:C 2024-12-03T15:20:34,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239294411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239294413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239294414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239294416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239294419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/131759d83bc146738c13d55a653d9af1 2024-12-03T15:20:34,622 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-03T15:20:34,629 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/24a366f2ea334141b243fe264614e7d5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/24a366f2ea334141b243fe264614e7d5 2024-12-03T15:20:34,655 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/B of 267a7e743c7c4973345ceaeae71cae1f into 24a366f2ea334141b243fe264614e7d5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:34,655 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:34,655 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/B, priority=13, startTime=1733239234042; duration=0sec 2024-12-03T15:20:34,656 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:34,656 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:B 2024-12-03T15:20:34,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/a50ee0a0ddf4486dbc9ade7100a94e4b is 50, key is test_row_0/C:col10/1733239234040/Put/seqid=0 2024-12-03T15:20:34,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741853_1029 (size=12001) 2024-12-03T15:20:34,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/a50ee0a0ddf4486dbc9ade7100a94e4b 2024-12-03T15:20:34,694 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-03T15:20:34,700 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-03T15:20:34,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/58a1b04ed4ea4ff6a571ec53c477ab1b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/58a1b04ed4ea4ff6a571ec53c477ab1b 2024-12-03T15:20:34,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/58a1b04ed4ea4ff6a571ec53c477ab1b, entries=150, sequenceid=79, filesize=11.7 K 2024-12-03T15:20:34,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239294722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/131759d83bc146738c13d55a653d9af1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/131759d83bc146738c13d55a653d9af1 2024-12-03T15:20:34,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239294720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239294723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239294722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:34,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239294735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:34,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/131759d83bc146738c13d55a653d9af1, entries=150, sequenceid=79, filesize=11.7 K 2024-12-03T15:20:34,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/a50ee0a0ddf4486dbc9ade7100a94e4b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a50ee0a0ddf4486dbc9ade7100a94e4b 2024-12-03T15:20:34,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a50ee0a0ddf4486dbc9ade7100a94e4b, entries=150, sequenceid=79, filesize=11.7 K 2024-12-03T15:20:34,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 267a7e743c7c4973345ceaeae71cae1f in 717ms, sequenceid=79, compaction requested=false 2024-12-03T15:20:34,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:35,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:20:35,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:35,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:35,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:35,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:35,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:35,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:35,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-03T15:20:35,256 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-03T15:20:35,259 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:20:35,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-03T15:20:35,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-03T15:20:35,264 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:20:35,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/8b8a9e58ca1d4b89a1a5e07ae6d440ab is 50, key is test_row_0/A:col10/1733239235241/Put/seqid=0 2024-12-03T15:20:35,266 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:20:35,267 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:20:35,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741854_1030 (size=16681) 2024-12-03T15:20:35,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239295300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239295306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239295306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239295309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239295310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-03T15:20:35,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239295414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239295415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,422 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-03T15:20:35,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:35,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:35,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:35,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:35,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:35,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239295419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239295417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239295420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:35,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-03T15:20:35,578 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-03T15:20:35,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:35,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:35,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:35,580 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:35,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:35,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:35,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239295620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239295626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239295627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239295628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239295629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,686 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/8b8a9e58ca1d4b89a1a5e07ae6d440ab 2024-12-03T15:20:35,738 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-03T15:20:35,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:35,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:35,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:35,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:35,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:35,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:35,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/ba26e9db5404449396de768cd80f24d3 is 50, key is test_row_0/B:col10/1733239235241/Put/seqid=0 2024-12-03T15:20:35,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741855_1031 (size=12001) 2024-12-03T15:20:35,811 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/ba26e9db5404449396de768cd80f24d3 2024-12-03T15:20:35,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/3ce0242b6c5d4eb0bafd6620ff86d605 is 50, key is test_row_0/C:col10/1733239235241/Put/seqid=0 2024-12-03T15:20:35,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-03T15:20:35,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741856_1032 (size=12001) 2024-12-03T15:20:35,896 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-03T15:20:35,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:35,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:35,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:35,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:35,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:35,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/3ce0242b6c5d4eb0bafd6620ff86d605 2024-12-03T15:20:35,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239295926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/8b8a9e58ca1d4b89a1a5e07ae6d440ab as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8b8a9e58ca1d4b89a1a5e07ae6d440ab 2024-12-03T15:20:35,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239295938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239295939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239295939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:35,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239295939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:35,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8b8a9e58ca1d4b89a1a5e07ae6d440ab, entries=250, sequenceid=94, filesize=16.3 K 2024-12-03T15:20:35,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/ba26e9db5404449396de768cd80f24d3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ba26e9db5404449396de768cd80f24d3 2024-12-03T15:20:35,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ba26e9db5404449396de768cd80f24d3, entries=150, sequenceid=94, filesize=11.7 K 2024-12-03T15:20:35,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/3ce0242b6c5d4eb0bafd6620ff86d605 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3ce0242b6c5d4eb0bafd6620ff86d605 2024-12-03T15:20:35,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3ce0242b6c5d4eb0bafd6620ff86d605, entries=150, sequenceid=94, filesize=11.7 K 2024-12-03T15:20:36,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 267a7e743c7c4973345ceaeae71cae1f in 756ms, sequenceid=94, compaction requested=true 2024-12-03T15:20:36,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:36,001 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:36,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:20:36,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:36,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:20:36,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:36,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:20:36,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:36,002 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:36,004 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40786 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:36,004 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/A is initiating minor compaction (all files) 2024-12-03T15:20:36,004 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/A in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:36,004 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8ee0aa2ffb6543aba188fb648c52035e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/58a1b04ed4ea4ff6a571ec53c477ab1b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8b8a9e58ca1d4b89a1a5e07ae6d440ab] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=39.8 K 2024-12-03T15:20:36,005 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:36,005 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/B is initiating minor compaction (all files) 2024-12-03T15:20:36,005 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/B in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:36,006 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/24a366f2ea334141b243fe264614e7d5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/131759d83bc146738c13d55a653d9af1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ba26e9db5404449396de768cd80f24d3] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=35.3 K 2024-12-03T15:20:36,006 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ee0aa2ffb6543aba188fb648c52035e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733239233669 2024-12-03T15:20:36,008 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 58a1b04ed4ea4ff6a571ec53c477ab1b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733239233871 2024-12-03T15:20:36,008 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24a366f2ea334141b243fe264614e7d5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733239233669 2024-12-03T15:20:36,010 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b8a9e58ca1d4b89a1a5e07ae6d440ab, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733239234080 2024-12-03T15:20:36,010 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 131759d83bc146738c13d55a653d9af1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733239233871 2024-12-03T15:20:36,011 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba26e9db5404449396de768cd80f24d3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733239234080 2024-12-03T15:20:36,033 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#A#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:36,034 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/2ad0c6f83df446f0b256eeed7de2f41a is 50, key is test_row_0/A:col10/1733239235241/Put/seqid=0 2024-12-03T15:20:36,042 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#B#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:36,043 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/e16b7a1fb3434d0f9b5d03270cb8e27c is 50, key is test_row_0/B:col10/1733239235241/Put/seqid=0 2024-12-03T15:20:36,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741857_1033 (size=12207) 2024-12-03T15:20:36,053 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,053 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-03T15:20:36,054 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-03T15:20:36,057 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-03T15:20:36,057 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-03T15:20:36,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-03T15:20:36,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:36,059 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-03T15:20:36,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:36,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:36,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:36,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:36,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:36,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:36,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T15:20:36,060 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-03T15:20:36,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-03T15:20:36,061 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-03T15:20:36,062 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-03T15:20:36,062 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-03T15:20:36,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741858_1034 (size=12207) 2024-12-03T15:20:36,074 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/2ad0c6f83df446f0b256eeed7de2f41a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2ad0c6f83df446f0b256eeed7de2f41a 2024-12-03T15:20:36,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/cd4ff8bc00ee487e836e28f64c6084e2 is 50, key is test_row_0/A:col10/1733239235306/Put/seqid=0 2024-12-03T15:20:36,090 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/e16b7a1fb3434d0f9b5d03270cb8e27c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/e16b7a1fb3434d0f9b5d03270cb8e27c 2024-12-03T15:20:36,106 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/A of 267a7e743c7c4973345ceaeae71cae1f into 2ad0c6f83df446f0b256eeed7de2f41a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:36,106 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:36,106 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/A, priority=13, startTime=1733239236001; duration=0sec 2024-12-03T15:20:36,107 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:36,107 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:A 2024-12-03T15:20:36,107 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:36,109 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/B of 267a7e743c7c4973345ceaeae71cae1f into e16b7a1fb3434d0f9b5d03270cb8e27c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:36,109 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:36,109 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/B, priority=13, startTime=1733239236001; duration=0sec 2024-12-03T15:20:36,110 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:36,110 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:36,110 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/C is initiating minor compaction (all files) 2024-12-03T15:20:36,110 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:B 2024-12-03T15:20:36,110 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/C in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:36,110 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/5a82592f42f44320a1147458cba93af4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a50ee0a0ddf4486dbc9ade7100a94e4b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3ce0242b6c5d4eb0bafd6620ff86d605] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=35.3 K 2024-12-03T15:20:36,111 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a82592f42f44320a1147458cba93af4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733239233669 2024-12-03T15:20:36,112 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting a50ee0a0ddf4486dbc9ade7100a94e4b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733239233871 2024-12-03T15:20:36,113 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ce0242b6c5d4eb0bafd6620ff86d605, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733239234080 2024-12-03T15:20:36,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741859_1035 (size=12001) 2024-12-03T15:20:36,126 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/cd4ff8bc00ee487e836e28f64c6084e2 2024-12-03T15:20:36,131 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#C#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:36,132 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/4b9c927e04e647e386693bde58c65a1f is 50, key is test_row_0/C:col10/1733239235241/Put/seqid=0 2024-12-03T15:20:36,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741860_1036 (size=12207) 2024-12-03T15:20:36,188 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/4b9c927e04e647e386693bde58c65a1f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/4b9c927e04e647e386693bde58c65a1f 2024-12-03T15:20:36,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/94b9dba8d2cf48a8b1248bedf3eaf2ec is 50, key is test_row_0/B:col10/1733239235306/Put/seqid=0 2024-12-03T15:20:36,202 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/C of 267a7e743c7c4973345ceaeae71cae1f into 4b9c927e04e647e386693bde58c65a1f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:36,228 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:36,228 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/C, priority=13, startTime=1733239236001; duration=0sec 2024-12-03T15:20:36,228 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:36,228 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:C 2024-12-03T15:20:36,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741861_1037 (size=12001) 2024-12-03T15:20:36,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-03T15:20:36,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:36,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:36,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239296451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239296453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239296453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239296455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239296455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239296559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239296563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239296564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239296564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239296564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,636 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/94b9dba8d2cf48a8b1248bedf3eaf2ec 2024-12-03T15:20:36,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/1937783b4a1e4de7b4b76f45013e6cf6 is 50, key is test_row_0/C:col10/1733239235306/Put/seqid=0 2024-12-03T15:20:36,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741862_1038 (size=12001) 2024-12-03T15:20:36,678 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/1937783b4a1e4de7b4b76f45013e6cf6 2024-12-03T15:20:36,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/cd4ff8bc00ee487e836e28f64c6084e2 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/cd4ff8bc00ee487e836e28f64c6084e2 2024-12-03T15:20:36,703 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/cd4ff8bc00ee487e836e28f64c6084e2, entries=150, sequenceid=118, filesize=11.7 K 2024-12-03T15:20:36,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/94b9dba8d2cf48a8b1248bedf3eaf2ec as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/94b9dba8d2cf48a8b1248bedf3eaf2ec 2024-12-03T15:20:36,720 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/94b9dba8d2cf48a8b1248bedf3eaf2ec, entries=150, sequenceid=118, filesize=11.7 K 2024-12-03T15:20:36,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/1937783b4a1e4de7b4b76f45013e6cf6 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1937783b4a1e4de7b4b76f45013e6cf6 2024-12-03T15:20:36,736 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1937783b4a1e4de7b4b76f45013e6cf6, entries=150, sequenceid=118, filesize=11.7 K 2024-12-03T15:20:36,738 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 267a7e743c7c4973345ceaeae71cae1f in 679ms, sequenceid=118, compaction requested=false 2024-12-03T15:20:36,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:36,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:36,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-03T15:20:36,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-03T15:20:36,748 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-03T15:20:36,748 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4750 sec 2024-12-03T15:20:36,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.4900 sec 2024-12-03T15:20:36,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-03T15:20:36,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:36,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:36,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:36,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:36,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:36,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:36,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:36,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/90cbde9c669d4988818b3d58b78f6760 is 50, key is test_row_0/A:col10/1733239236447/Put/seqid=0 2024-12-03T15:20:36,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239296792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239296792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239296795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239296794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239296797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741863_1039 (size=12151) 2024-12-03T15:20:36,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239296902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239296902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239296904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239296904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:36,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239296904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239297107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239297110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239297108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239297110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239297109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,241 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/90cbde9c669d4988818b3d58b78f6760 2024-12-03T15:20:37,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/f7c1cfc8d7c74795841a4daeebaea929 is 50, key is test_row_0/B:col10/1733239236447/Put/seqid=0 2024-12-03T15:20:37,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741864_1040 (size=12151) 2024-12-03T15:20:37,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/f7c1cfc8d7c74795841a4daeebaea929 2024-12-03T15:20:37,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/8fcf87a0493449cc84b51fb301c69b7b is 50, key is test_row_0/C:col10/1733239236447/Put/seqid=0 2024-12-03T15:20:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-03T15:20:37,373 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-03T15:20:37,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741865_1041 (size=12151) 2024-12-03T15:20:37,378 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:20:37,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-03T15:20:37,381 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:20:37,382 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:20:37,383 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:20:37,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-03T15:20:37,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239297413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239297417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239297418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239297419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239297419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-03T15:20:37,536 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-03T15:20:37,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:37,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:37,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:37,537 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:37,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:37,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:37,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-03T15:20:37,695 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-03T15:20:37,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:37,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:37,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:37,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:37,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:37,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:37,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/8fcf87a0493449cc84b51fb301c69b7b 2024-12-03T15:20:37,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/90cbde9c669d4988818b3d58b78f6760 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/90cbde9c669d4988818b3d58b78f6760 2024-12-03T15:20:37,821 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/90cbde9c669d4988818b3d58b78f6760, entries=150, sequenceid=136, filesize=11.9 K 2024-12-03T15:20:37,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/f7c1cfc8d7c74795841a4daeebaea929 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/f7c1cfc8d7c74795841a4daeebaea929 2024-12-03T15:20:37,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/f7c1cfc8d7c74795841a4daeebaea929, entries=150, sequenceid=136, filesize=11.9 K 2024-12-03T15:20:37,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/8fcf87a0493449cc84b51fb301c69b7b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/8fcf87a0493449cc84b51fb301c69b7b 2024-12-03T15:20:37,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/8fcf87a0493449cc84b51fb301c69b7b, entries=150, sequenceid=136, filesize=11.9 K 2024-12-03T15:20:37,853 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 267a7e743c7c4973345ceaeae71cae1f in 1085ms, sequenceid=136, compaction requested=true 2024-12-03T15:20:37,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:37,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:20:37,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:37,854 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:37,854 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:37,854 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:20:37,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:37,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:20:37,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:37,856 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:37,856 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/A is initiating minor compaction (all files) 2024-12-03T15:20:37,856 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/A in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:37,856 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2ad0c6f83df446f0b256eeed7de2f41a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/cd4ff8bc00ee487e836e28f64c6084e2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/90cbde9c669d4988818b3d58b78f6760] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=35.5 K 2024-12-03T15:20:37,856 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:37,857 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/B is initiating minor compaction (all files) 2024-12-03T15:20:37,857 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/B in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:37,857 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/e16b7a1fb3434d0f9b5d03270cb8e27c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/94b9dba8d2cf48a8b1248bedf3eaf2ec, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/f7c1cfc8d7c74795841a4daeebaea929] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=35.5 K 2024-12-03T15:20:37,857 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ad0c6f83df446f0b256eeed7de2f41a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733239234080 2024-12-03T15:20:37,857 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting e16b7a1fb3434d0f9b5d03270cb8e27c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733239234080 2024-12-03T15:20:37,858 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-03T15:20:37,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:37,859 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-03T15:20:37,859 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 94b9dba8d2cf48a8b1248bedf3eaf2ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733239235302 2024-12-03T15:20:37,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:37,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:37,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:37,859 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd4ff8bc00ee487e836e28f64c6084e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733239235302 2024-12-03T15:20:37,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:37,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:37,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:37,860 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting f7c1cfc8d7c74795841a4daeebaea929, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733239236447 2024-12-03T15:20:37,860 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90cbde9c669d4988818b3d58b78f6760, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733239236447 2024-12-03T15:20:37,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/7b37b8a1f07543e7a339ad4e313200ea is 50, key is test_row_0/A:col10/1733239236792/Put/seqid=0 2024-12-03T15:20:37,905 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#B#compaction#29 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:37,905 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/9279ea2d9f5a41f0b0f17e350832e8dd is 50, key is test_row_0/B:col10/1733239236447/Put/seqid=0 2024-12-03T15:20:37,912 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#A#compaction#28 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:37,913 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/fb9b991d09854e428d0b2ab43e995c94 is 50, key is test_row_0/A:col10/1733239236447/Put/seqid=0 2024-12-03T15:20:37,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:37,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:37,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741866_1042 (size=12151) 2024-12-03T15:20:37,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239297946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,955 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239297947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239297952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239297949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:37,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239297953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:37,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741867_1043 (size=12459) 2024-12-03T15:20:37,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741868_1044 (size=12459) 2024-12-03T15:20:37,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-03T15:20:38,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239298059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,062 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239298059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239298060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239298060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239298060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239298264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239298264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239298265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239298266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239298267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,330 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/7b37b8a1f07543e7a339ad4e313200ea 2024-12-03T15:20:38,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b8d4d3d815974629b77fb0bbecb3a3d7 is 50, key is test_row_0/B:col10/1733239236792/Put/seqid=0 2024-12-03T15:20:38,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741869_1045 (size=12151) 2024-12-03T15:20:38,382 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/9279ea2d9f5a41f0b0f17e350832e8dd as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/9279ea2d9f5a41f0b0f17e350832e8dd 2024-12-03T15:20:38,389 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/fb9b991d09854e428d0b2ab43e995c94 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/fb9b991d09854e428d0b2ab43e995c94 2024-12-03T15:20:38,400 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/B of 267a7e743c7c4973345ceaeae71cae1f into 9279ea2d9f5a41f0b0f17e350832e8dd(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:38,400 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:38,400 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/B, priority=13, startTime=1733239237854; duration=0sec 2024-12-03T15:20:38,400 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:38,400 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:B 2024-12-03T15:20:38,400 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:38,403 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:38,403 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/C is initiating minor compaction (all files) 2024-12-03T15:20:38,403 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/C in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:38,404 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/4b9c927e04e647e386693bde58c65a1f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1937783b4a1e4de7b4b76f45013e6cf6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/8fcf87a0493449cc84b51fb301c69b7b] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=35.5 K 2024-12-03T15:20:38,405 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b9c927e04e647e386693bde58c65a1f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733239234080 2024-12-03T15:20:38,406 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 1937783b4a1e4de7b4b76f45013e6cf6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733239235302 2024-12-03T15:20:38,407 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fcf87a0493449cc84b51fb301c69b7b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733239236447 2024-12-03T15:20:38,407 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/A of 267a7e743c7c4973345ceaeae71cae1f into fb9b991d09854e428d0b2ab43e995c94(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:38,408 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:38,408 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/A, priority=13, startTime=1733239237853; duration=0sec 2024-12-03T15:20:38,408 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:38,408 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:A 2024-12-03T15:20:38,432 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#C#compaction#31 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:38,433 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/9967f54d85c84d60ac5723c758a31d1a is 50, key is test_row_0/C:col10/1733239236447/Put/seqid=0 2024-12-03T15:20:38,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741870_1046 (size=12459) 2024-12-03T15:20:38,474 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/9967f54d85c84d60ac5723c758a31d1a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/9967f54d85c84d60ac5723c758a31d1a 2024-12-03T15:20:38,487 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/C of 267a7e743c7c4973345ceaeae71cae1f into 9967f54d85c84d60ac5723c758a31d1a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:38,488 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:38,488 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/C, priority=13, startTime=1733239237855; duration=0sec 2024-12-03T15:20:38,488 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:38,488 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:C 2024-12-03T15:20:38,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-03T15:20:38,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239298573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239298574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239298574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239298582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:38,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239298575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:38,763 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b8d4d3d815974629b77fb0bbecb3a3d7 2024-12-03T15:20:38,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/a822c0b37cc24849b60785fd8f4b9383 is 50, key is test_row_0/C:col10/1733239236792/Put/seqid=0 2024-12-03T15:20:38,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741871_1047 (size=12151) 2024-12-03T15:20:39,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239299077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:39,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239299079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:39,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239299082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:39,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239299091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:39,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239299095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:39,214 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/a822c0b37cc24849b60785fd8f4b9383 2024-12-03T15:20:39,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/7b37b8a1f07543e7a339ad4e313200ea as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/7b37b8a1f07543e7a339ad4e313200ea 2024-12-03T15:20:39,243 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/7b37b8a1f07543e7a339ad4e313200ea, entries=150, sequenceid=157, filesize=11.9 K 2024-12-03T15:20:39,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b8d4d3d815974629b77fb0bbecb3a3d7 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b8d4d3d815974629b77fb0bbecb3a3d7 2024-12-03T15:20:39,258 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b8d4d3d815974629b77fb0bbecb3a3d7, entries=150, sequenceid=157, filesize=11.9 K 2024-12-03T15:20:39,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/a822c0b37cc24849b60785fd8f4b9383 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a822c0b37cc24849b60785fd8f4b9383 2024-12-03T15:20:39,271 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a822c0b37cc24849b60785fd8f4b9383, entries=150, sequenceid=157, filesize=11.9 K 2024-12-03T15:20:39,274 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 267a7e743c7c4973345ceaeae71cae1f in 1414ms, sequenceid=157, compaction requested=false 2024-12-03T15:20:39,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:39,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:39,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-03T15:20:39,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-03T15:20:39,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-03T15:20:39,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8930 sec 2024-12-03T15:20:39,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.9010 sec 2024-12-03T15:20:39,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-03T15:20:39,497 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-03T15:20:39,500 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:20:39,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-03T15:20:39,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-03T15:20:39,503 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:20:39,504 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:20:39,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:20:39,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-03T15:20:39,656 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:39,657 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-03T15:20:39,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:39,658 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-03T15:20:39,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:39,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:39,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:39,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:39,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:39,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:39,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/225299190056470bb36156a7cf264b75 is 50, key is test_row_0/A:col10/1733239237937/Put/seqid=0 2024-12-03T15:20:39,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741872_1048 (size=12151) 2024-12-03T15:20:39,697 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/225299190056470bb36156a7cf264b75 2024-12-03T15:20:39,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/6fda85e561e047cb9a5da5a5387ec851 is 50, key is test_row_0/B:col10/1733239237937/Put/seqid=0 2024-12-03T15:20:39,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741873_1049 (size=12151) 2024-12-03T15:20:39,748 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/6fda85e561e047cb9a5da5a5387ec851 2024-12-03T15:20:39,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/6c431344777f416baaa1bcecc62c817e is 50, key is test_row_0/C:col10/1733239237937/Put/seqid=0 2024-12-03T15:20:39,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741874_1050 (size=12151) 2024-12-03T15:20:39,789 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/6c431344777f416baaa1bcecc62c817e 2024-12-03T15:20:39,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/225299190056470bb36156a7cf264b75 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/225299190056470bb36156a7cf264b75 2024-12-03T15:20:39,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-03T15:20:39,807 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/225299190056470bb36156a7cf264b75, entries=150, sequenceid=175, filesize=11.9 K 2024-12-03T15:20:39,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/6fda85e561e047cb9a5da5a5387ec851 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/6fda85e561e047cb9a5da5a5387ec851 2024-12-03T15:20:39,818 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/6fda85e561e047cb9a5da5a5387ec851, entries=150, sequenceid=175, filesize=11.9 K 2024-12-03T15:20:39,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/6c431344777f416baaa1bcecc62c817e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/6c431344777f416baaa1bcecc62c817e 2024-12-03T15:20:39,828 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/6c431344777f416baaa1bcecc62c817e, entries=150, sequenceid=175, filesize=11.9 K 2024-12-03T15:20:39,830 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=0 B/0 for 267a7e743c7c4973345ceaeae71cae1f in 172ms, sequenceid=175, compaction requested=true 2024-12-03T15:20:39,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:39,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:39,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-03T15:20:39,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-03T15:20:39,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-03T15:20:39,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 328 msec 2024-12-03T15:20:39,838 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 335 msec 2024-12-03T15:20:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-03T15:20:40,114 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-03T15:20:40,120 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:20:40,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-03T15:20:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:40,122 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:20:40,124 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:20:40,124 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:20:40,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-03T15:20:40,125 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-03T15:20:40,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:40,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:40,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:40,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:40,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:40,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:40,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/080d351955b742f69eae3a4efbdb8556 is 50, key is test_row_0/A:col10/1733239240123/Put/seqid=0 2024-12-03T15:20:40,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741875_1051 (size=12151) 2024-12-03T15:20:40,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239300190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239300198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239300191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239300199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239300214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-03T15:20:40,288 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-03T15:20:40,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:40,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:40,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:40,290 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239300307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239300310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239300310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239300314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239300322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-03T15:20:40,458 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,459 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-03T15:20:40,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:40,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:40,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:40,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239300512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239300518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239300518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239300523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239300526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/080d351955b742f69eae3a4efbdb8556 2024-12-03T15:20:40,614 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-03T15:20:40,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:40,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:40,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:40,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/cf97da9887d84d7296bfb3871b662fa9 is 50, key is test_row_0/B:col10/1733239240123/Put/seqid=0 2024-12-03T15:20:40,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741876_1052 (size=12151) 2024-12-03T15:20:40,651 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/cf97da9887d84d7296bfb3871b662fa9 2024-12-03T15:20:40,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/9da6204b07794f04b82330942d4159f7 is 50, key is test_row_0/C:col10/1733239240123/Put/seqid=0 2024-12-03T15:20:40,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741877_1053 (size=12151) 2024-12-03T15:20:40,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-03T15:20:40,774 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-03T15:20:40,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:40,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:40,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:40,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239300824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239300826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239300826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239300833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:40,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239300834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,928 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:40,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-03T15:20:40,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:40,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:40,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:40,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:40,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:41,096 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,097 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-03T15:20:41,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:41,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:41,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:41,098 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:41,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:41,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:41,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=188 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/9da6204b07794f04b82330942d4159f7 2024-12-03T15:20:41,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/080d351955b742f69eae3a4efbdb8556 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/080d351955b742f69eae3a4efbdb8556 2024-12-03T15:20:41,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/080d351955b742f69eae3a4efbdb8556, entries=150, sequenceid=188, filesize=11.9 K 2024-12-03T15:20:41,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/cf97da9887d84d7296bfb3871b662fa9 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/cf97da9887d84d7296bfb3871b662fa9 2024-12-03T15:20:41,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/cf97da9887d84d7296bfb3871b662fa9, entries=150, sequenceid=188, filesize=11.9 K 2024-12-03T15:20:41,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/9da6204b07794f04b82330942d4159f7 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/9da6204b07794f04b82330942d4159f7 2024-12-03T15:20:41,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/9da6204b07794f04b82330942d4159f7, entries=150, sequenceid=188, filesize=11.9 K 2024-12-03T15:20:41,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 267a7e743c7c4973345ceaeae71cae1f in 1040ms, sequenceid=188, compaction requested=true 2024-12-03T15:20:41,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:41,166 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:20:41,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:20:41,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:41,168 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:20:41,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:20:41,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:41,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:20:41,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:41,168 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:20:41,169 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/A is initiating minor compaction (all files) 2024-12-03T15:20:41,169 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/A in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:41,169 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/fb9b991d09854e428d0b2ab43e995c94, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/7b37b8a1f07543e7a339ad4e313200ea, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/225299190056470bb36156a7cf264b75, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/080d351955b742f69eae3a4efbdb8556] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=47.8 K 2024-12-03T15:20:41,170 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb9b991d09854e428d0b2ab43e995c94, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733239236447 2024-12-03T15:20:41,170 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:20:41,170 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/B is initiating minor compaction (all files) 2024-12-03T15:20:41,170 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/B in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:41,170 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/9279ea2d9f5a41f0b0f17e350832e8dd, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b8d4d3d815974629b77fb0bbecb3a3d7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/6fda85e561e047cb9a5da5a5387ec851, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/cf97da9887d84d7296bfb3871b662fa9] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=47.8 K 2024-12-03T15:20:41,170 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b37b8a1f07543e7a339ad4e313200ea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733239236788 2024-12-03T15:20:41,171 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 9279ea2d9f5a41f0b0f17e350832e8dd, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733239236447 2024-12-03T15:20:41,171 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 225299190056470bb36156a7cf264b75, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733239237937 2024-12-03T15:20:41,171 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b8d4d3d815974629b77fb0bbecb3a3d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733239236788 2024-12-03T15:20:41,171 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 080d351955b742f69eae3a4efbdb8556, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1733239240116 2024-12-03T15:20:41,172 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fda85e561e047cb9a5da5a5387ec851, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733239237937 2024-12-03T15:20:41,173 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting cf97da9887d84d7296bfb3871b662fa9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1733239240116 2024-12-03T15:20:41,190 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#A#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:41,190 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/b632e2750d034b68aa813dbdb9f0af98 is 50, key is test_row_0/A:col10/1733239240123/Put/seqid=0 2024-12-03T15:20:41,196 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#B#compaction#40 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:41,197 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/0fa2f5fc7b174dfc8db734746d9178f1 is 50, key is test_row_0/B:col10/1733239240123/Put/seqid=0 2024-12-03T15:20:41,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741878_1054 (size=12595) 2024-12-03T15:20:41,225 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/b632e2750d034b68aa813dbdb9f0af98 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/b632e2750d034b68aa813dbdb9f0af98 2024-12-03T15:20:41,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741879_1055 (size=12595) 2024-12-03T15:20:41,237 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/A of 267a7e743c7c4973345ceaeae71cae1f into b632e2750d034b68aa813dbdb9f0af98(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:41,237 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:41,237 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/A, priority=12, startTime=1733239241166; duration=0sec 2024-12-03T15:20:41,238 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:41,241 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:A 2024-12-03T15:20:41,241 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:20:41,243 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:20:41,243 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/C is initiating minor compaction (all files) 2024-12-03T15:20:41,243 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/C in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:41,243 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/9967f54d85c84d60ac5723c758a31d1a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a822c0b37cc24849b60785fd8f4b9383, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/6c431344777f416baaa1bcecc62c817e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/9da6204b07794f04b82330942d4159f7] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=47.8 K 2024-12-03T15:20:41,244 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9967f54d85c84d60ac5723c758a31d1a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733239236447 2024-12-03T15:20:41,244 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting a822c0b37cc24849b60785fd8f4b9383, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733239236788 2024-12-03T15:20:41,245 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c431344777f416baaa1bcecc62c817e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733239237937 2024-12-03T15:20:41,246 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9da6204b07794f04b82330942d4159f7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1733239240116 2024-12-03T15:20:41,251 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-03T15:20:41,252 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-03T15:20:41,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:41,252 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-03T15:20:41,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:41,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:41,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:41,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:41,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:41,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:41,258 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/0fa2f5fc7b174dfc8db734746d9178f1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0fa2f5fc7b174dfc8db734746d9178f1 2024-12-03T15:20:41,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/3949941d2d124e70b88af806b4fed31c is 50, key is test_row_0/A:col10/1733239240201/Put/seqid=0 2024-12-03T15:20:41,275 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/B of 267a7e743c7c4973345ceaeae71cae1f into 0fa2f5fc7b174dfc8db734746d9178f1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:41,276 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:41,276 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/B, priority=12, startTime=1733239241168; duration=0sec 2024-12-03T15:20:41,276 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:41,276 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:B 2024-12-03T15:20:41,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741880_1056 (size=12151) 2024-12-03T15:20:41,293 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/3949941d2d124e70b88af806b4fed31c 2024-12-03T15:20:41,304 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#C#compaction#42 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:41,305 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/44be62ecd48e4cd49e86aac63c0f2643 is 50, key is test_row_0/C:col10/1733239240123/Put/seqid=0 2024-12-03T15:20:41,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/9f16318608164f84b84f9a3e501669f9 is 50, key is test_row_0/B:col10/1733239240201/Put/seqid=0 2024-12-03T15:20:41,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741881_1057 (size=12595) 2024-12-03T15:20:41,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:41,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:41,345 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/44be62ecd48e4cd49e86aac63c0f2643 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/44be62ecd48e4cd49e86aac63c0f2643 2024-12-03T15:20:41,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741882_1058 (size=12151) 2024-12-03T15:20:41,362 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/C of 267a7e743c7c4973345ceaeae71cae1f into 44be62ecd48e4cd49e86aac63c0f2643(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:41,362 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:41,363 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/C, priority=12, startTime=1733239241168; duration=0sec 2024-12-03T15:20:41,363 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:41,363 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:C 2024-12-03T15:20:41,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239301347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239301351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239301363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239301363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239301363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239301466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239301467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239301473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239301475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239301475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239301671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239301679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239301679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239301688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239301690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,763 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/9f16318608164f84b84f9a3e501669f9 2024-12-03T15:20:41,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/265b953e000843f3a52fa907ceb29f51 is 50, key is test_row_0/C:col10/1733239240201/Put/seqid=0 2024-12-03T15:20:41,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741883_1059 (size=12151) 2024-12-03T15:20:41,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239301989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239301989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239301989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239301992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:41,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:41,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239301994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,217 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/265b953e000843f3a52fa907ceb29f51 2024-12-03T15:20:42,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/3949941d2d124e70b88af806b4fed31c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/3949941d2d124e70b88af806b4fed31c 2024-12-03T15:20:42,249 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/3949941d2d124e70b88af806b4fed31c, entries=150, sequenceid=212, filesize=11.9 K 2024-12-03T15:20:42,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/9f16318608164f84b84f9a3e501669f9 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/9f16318608164f84b84f9a3e501669f9 2024-12-03T15:20:42,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-03T15:20:42,260 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/9f16318608164f84b84f9a3e501669f9, entries=150, sequenceid=212, filesize=11.9 K 2024-12-03T15:20:42,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/265b953e000843f3a52fa907ceb29f51 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/265b953e000843f3a52fa907ceb29f51 2024-12-03T15:20:42,273 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/265b953e000843f3a52fa907ceb29f51, entries=150, sequenceid=212, filesize=11.9 K 2024-12-03T15:20:42,275 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 267a7e743c7c4973345ceaeae71cae1f in 1022ms, sequenceid=212, compaction requested=false 2024-12-03T15:20:42,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:42,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:42,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-03T15:20:42,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-03T15:20:42,279 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-03T15:20:42,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1530 sec 2024-12-03T15:20:42,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 2.1610 sec 2024-12-03T15:20:42,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:42,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-03T15:20:42,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:42,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:42,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:42,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:42,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:42,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:42,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/e2ec41c383914caabba5eb327ba74134 is 50, key is test_row_0/A:col10/1733239242496/Put/seqid=0 2024-12-03T15:20:42,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239302524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239302524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239302525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239302525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239302527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741884_1060 (size=12151) 2024-12-03T15:20:42,537 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/e2ec41c383914caabba5eb327ba74134 2024-12-03T15:20:42,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/4c8ea5085d7241cd8c99c9e858d52794 is 50, key is test_row_0/B:col10/1733239242496/Put/seqid=0 2024-12-03T15:20:42,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741885_1061 (size=12151) 2024-12-03T15:20:42,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239302631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239302631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239302632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239302633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239302633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239302836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239302836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239302837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239302837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239302840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:42,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/4c8ea5085d7241cd8c99c9e858d52794 2024-12-03T15:20:42,992 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/1f9a89fa24404278a2e6a8ad4d20d654 is 50, key is test_row_0/C:col10/1733239242496/Put/seqid=0 2024-12-03T15:20:43,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741886_1062 (size=12151) 2024-12-03T15:20:43,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/1f9a89fa24404278a2e6a8ad4d20d654 2024-12-03T15:20:43,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/e2ec41c383914caabba5eb327ba74134 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/e2ec41c383914caabba5eb327ba74134 2024-12-03T15:20:43,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/e2ec41c383914caabba5eb327ba74134, entries=150, sequenceid=229, filesize=11.9 K 2024-12-03T15:20:43,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/4c8ea5085d7241cd8c99c9e858d52794 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/4c8ea5085d7241cd8c99c9e858d52794 2024-12-03T15:20:43,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/4c8ea5085d7241cd8c99c9e858d52794, entries=150, sequenceid=229, filesize=11.9 K 2024-12-03T15:20:43,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/1f9a89fa24404278a2e6a8ad4d20d654 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1f9a89fa24404278a2e6a8ad4d20d654 2024-12-03T15:20:43,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1f9a89fa24404278a2e6a8ad4d20d654, entries=150, sequenceid=229, filesize=11.9 K 2024-12-03T15:20:43,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 267a7e743c7c4973345ceaeae71cae1f in 595ms, sequenceid=229, compaction requested=true 2024-12-03T15:20:43,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:43,092 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:43,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:20:43,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:43,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:20:43,093 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:43,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:43,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:20:43,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:43,102 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:43,103 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/A is initiating minor compaction (all files) 2024-12-03T15:20:43,103 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/A in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:43,103 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/b632e2750d034b68aa813dbdb9f0af98, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/3949941d2d124e70b88af806b4fed31c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/e2ec41c383914caabba5eb327ba74134] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.0 K 2024-12-03T15:20:43,110 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting b632e2750d034b68aa813dbdb9f0af98, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1733239240116 2024-12-03T15:20:43,110 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:43,111 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/B is initiating minor compaction (all files) 2024-12-03T15:20:43,111 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/B in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:43,111 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0fa2f5fc7b174dfc8db734746d9178f1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/9f16318608164f84b84f9a3e501669f9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/4c8ea5085d7241cd8c99c9e858d52794] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.0 K 2024-12-03T15:20:43,114 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3949941d2d124e70b88af806b4fed31c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733239240185 2024-12-03T15:20:43,115 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fa2f5fc7b174dfc8db734746d9178f1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1733239240116 2024-12-03T15:20:43,116 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2ec41c383914caabba5eb327ba74134, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733239241349 2024-12-03T15:20:43,116 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f16318608164f84b84f9a3e501669f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733239240185 2024-12-03T15:20:43,117 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c8ea5085d7241cd8c99c9e858d52794, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733239241349 2024-12-03T15:20:43,130 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#B#compaction#48 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:43,131 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/0a2288dec73247cb929ebddf357fb2a0 is 50, key is test_row_0/B:col10/1733239242496/Put/seqid=0 2024-12-03T15:20:43,133 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#A#compaction#49 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:43,134 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/8f0df04255664ad1a06f53f965258b7f is 50, key is test_row_0/A:col10/1733239242496/Put/seqid=0 2024-12-03T15:20:43,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:43,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-03T15:20:43,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:43,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:43,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:43,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:43,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:43,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:43,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239303156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239303162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239303162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239303162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239303163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741887_1063 (size=12697) 2024-12-03T15:20:43,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/3537c567404c4d9aa6c0527caf568684 is 50, key is test_row_0/A:col10/1733239243144/Put/seqid=0 2024-12-03T15:20:43,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741888_1064 (size=12697) 2024-12-03T15:20:43,213 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/8f0df04255664ad1a06f53f965258b7f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8f0df04255664ad1a06f53f965258b7f 2024-12-03T15:20:43,221 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/A of 267a7e743c7c4973345ceaeae71cae1f into 8f0df04255664ad1a06f53f965258b7f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:43,221 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:43,222 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/A, priority=13, startTime=1733239243092; duration=0sec 2024-12-03T15:20:43,222 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:43,222 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:A 2024-12-03T15:20:43,222 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:43,224 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:43,224 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/C is initiating minor compaction (all files) 2024-12-03T15:20:43,224 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/C in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:43,224 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/44be62ecd48e4cd49e86aac63c0f2643, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/265b953e000843f3a52fa907ceb29f51, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1f9a89fa24404278a2e6a8ad4d20d654] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.0 K 2024-12-03T15:20:43,225 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44be62ecd48e4cd49e86aac63c0f2643, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=188, earliestPutTs=1733239240116 2024-12-03T15:20:43,225 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 265b953e000843f3a52fa907ceb29f51, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733239240185 2024-12-03T15:20:43,226 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f9a89fa24404278a2e6a8ad4d20d654, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733239241349 2024-12-03T15:20:43,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741889_1065 (size=12151) 2024-12-03T15:20:43,238 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/3537c567404c4d9aa6c0527caf568684 2024-12-03T15:20:43,244 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#C#compaction#51 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:43,245 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/3d14e80e32b34088b526a033a471b9cd is 50, key is test_row_0/C:col10/1733239242496/Put/seqid=0 2024-12-03T15:20:43,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/c67d669ff51d415b88b208642abe0f4a is 50, key is test_row_0/B:col10/1733239243144/Put/seqid=0 2024-12-03T15:20:43,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741890_1066 (size=12697) 2024-12-03T15:20:43,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239303267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239303269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741891_1067 (size=12151) 2024-12-03T15:20:43,272 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239303270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239303271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239303271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,285 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/c67d669ff51d415b88b208642abe0f4a 2024-12-03T15:20:43,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/5c8bef8466ab43cda1432782b4fc959e is 50, key is test_row_0/C:col10/1733239243144/Put/seqid=0 2024-12-03T15:20:43,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741892_1068 (size=12151) 2024-12-03T15:20:43,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239303471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239303476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239303476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239303478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239303479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,585 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/0a2288dec73247cb929ebddf357fb2a0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0a2288dec73247cb929ebddf357fb2a0 2024-12-03T15:20:43,603 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/B of 267a7e743c7c4973345ceaeae71cae1f into 0a2288dec73247cb929ebddf357fb2a0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:43,603 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:43,603 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/B, priority=13, startTime=1733239243093; duration=0sec 2024-12-03T15:20:43,603 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:43,603 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:B 2024-12-03T15:20:43,675 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/3d14e80e32b34088b526a033a471b9cd as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3d14e80e32b34088b526a033a471b9cd 2024-12-03T15:20:43,691 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/C of 267a7e743c7c4973345ceaeae71cae1f into 3d14e80e32b34088b526a033a471b9cd(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:43,691 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:43,691 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/C, priority=13, startTime=1733239243093; duration=0sec 2024-12-03T15:20:43,691 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:43,691 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:C 2024-12-03T15:20:43,718 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/5c8bef8466ab43cda1432782b4fc959e 2024-12-03T15:20:43,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/3537c567404c4d9aa6c0527caf568684 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/3537c567404c4d9aa6c0527caf568684 2024-12-03T15:20:43,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/3537c567404c4d9aa6c0527caf568684, entries=150, sequenceid=252, filesize=11.9 K 2024-12-03T15:20:43,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/c67d669ff51d415b88b208642abe0f4a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c67d669ff51d415b88b208642abe0f4a 2024-12-03T15:20:43,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c67d669ff51d415b88b208642abe0f4a, entries=150, sequenceid=252, filesize=11.9 K 2024-12-03T15:20:43,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/5c8bef8466ab43cda1432782b4fc959e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/5c8bef8466ab43cda1432782b4fc959e 2024-12-03T15:20:43,761 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/5c8bef8466ab43cda1432782b4fc959e, entries=150, sequenceid=252, filesize=11.9 K 2024-12-03T15:20:43,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 267a7e743c7c4973345ceaeae71cae1f in 617ms, sequenceid=252, compaction requested=false 2024-12-03T15:20:43,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:43,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:43,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-03T15:20:43,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:43,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:43,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:43,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:43,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:43,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:43,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/9186463f92fc47d0858ffd80187b1f6a is 50, key is test_row_0/A:col10/1733239243161/Put/seqid=0 2024-12-03T15:20:43,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741893_1069 (size=14741) 2024-12-03T15:20:43,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239303820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239303825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239303825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239303829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239303831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239303933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239303933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239303935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239303938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:43,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:43,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239303939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239304136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239304137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239304138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239304143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239304142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,210 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/9186463f92fc47d0858ffd80187b1f6a 2024-12-03T15:20:44,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/ea6fbfe0284e4798ae036565d5edf254 is 50, key is test_row_0/B:col10/1733239243161/Put/seqid=0 2024-12-03T15:20:44,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-03T15:20:44,254 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-03T15:20:44,256 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:20:44,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-03T15:20:44,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-03T15:20:44,259 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:20:44,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741894_1070 (size=12301) 2024-12-03T15:20:44,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/ea6fbfe0284e4798ae036565d5edf254 2024-12-03T15:20:44,263 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:20:44,263 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:20:44,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/79a0dd55fb824523911e11d96dbfa7f7 is 50, key is test_row_0/C:col10/1733239243161/Put/seqid=0 2024-12-03T15:20:44,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741895_1071 (size=12301) 2024-12-03T15:20:44,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/79a0dd55fb824523911e11d96dbfa7f7 2024-12-03T15:20:44,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/9186463f92fc47d0858ffd80187b1f6a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/9186463f92fc47d0858ffd80187b1f6a 2024-12-03T15:20:44,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/9186463f92fc47d0858ffd80187b1f6a, entries=200, sequenceid=269, filesize=14.4 K 2024-12-03T15:20:44,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/ea6fbfe0284e4798ae036565d5edf254 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ea6fbfe0284e4798ae036565d5edf254 2024-12-03T15:20:44,333 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ea6fbfe0284e4798ae036565d5edf254, entries=150, sequenceid=269, filesize=12.0 K 2024-12-03T15:20:44,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/79a0dd55fb824523911e11d96dbfa7f7 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/79a0dd55fb824523911e11d96dbfa7f7 2024-12-03T15:20:44,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-03T15:20:44,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/79a0dd55fb824523911e11d96dbfa7f7, entries=150, sequenceid=269, filesize=12.0 K 2024-12-03T15:20:44,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 267a7e743c7c4973345ceaeae71cae1f in 629ms, sequenceid=269, compaction requested=true 2024-12-03T15:20:44,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:44,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:20:44,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:44,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:20:44,408 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:44,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:44,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:20:44,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:20:44,409 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:44,410 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:44,411 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/B is initiating minor compaction (all files) 2024-12-03T15:20:44,411 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/B in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:44,411 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0a2288dec73247cb929ebddf357fb2a0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c67d669ff51d415b88b208642abe0f4a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ea6fbfe0284e4798ae036565d5edf254] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.3 K 2024-12-03T15:20:44,411 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39589 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:44,411 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/A is initiating minor compaction (all files) 2024-12-03T15:20:44,411 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/A in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:44,411 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a2288dec73247cb929ebddf357fb2a0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733239241349 2024-12-03T15:20:44,411 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8f0df04255664ad1a06f53f965258b7f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/3537c567404c4d9aa6c0527caf568684, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/9186463f92fc47d0858ffd80187b1f6a] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=38.7 K 2024-12-03T15:20:44,412 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f0df04255664ad1a06f53f965258b7f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733239241349 2024-12-03T15:20:44,412 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c67d669ff51d415b88b208642abe0f4a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733239242525 2024-12-03T15:20:44,412 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting ea6fbfe0284e4798ae036565d5edf254, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1733239243159 2024-12-03T15:20:44,412 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3537c567404c4d9aa6c0527caf568684, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733239242525 2024-12-03T15:20:44,413 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9186463f92fc47d0858ffd80187b1f6a, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1733239243157 2024-12-03T15:20:44,417 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,418 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-03T15:20:44,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:44,418 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-03T15:20:44,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:44,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:44,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:44,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:44,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:44,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:44,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:44,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:44,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/bb60750fbe5b4da29cf053908ce9d6f3 is 50, key is test_row_0/A:col10/1733239243824/Put/seqid=0 2024-12-03T15:20:44,451 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#B#compaction#58 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:44,452 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/fbcc3a8ed3604b1487e08bcd2709e2a6 is 50, key is test_row_0/B:col10/1733239243161/Put/seqid=0 2024-12-03T15:20:44,457 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#A#compaction#59 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:44,459 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/8dd736271a374461a81aa670ecf9d2f9 is 50, key is test_row_0/A:col10/1733239243161/Put/seqid=0 2024-12-03T15:20:44,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741896_1072 (size=12301) 2024-12-03T15:20:44,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239304469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239304474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741897_1073 (size=12949) 2024-12-03T15:20:44,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239304475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239304476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239304476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,494 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/fbcc3a8ed3604b1487e08bcd2709e2a6 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/fbcc3a8ed3604b1487e08bcd2709e2a6 2024-12-03T15:20:44,503 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/B of 267a7e743c7c4973345ceaeae71cae1f into fbcc3a8ed3604b1487e08bcd2709e2a6(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:44,503 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:44,503 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/B, priority=13, startTime=1733239244408; duration=0sec 2024-12-03T15:20:44,503 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:44,503 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:B 2024-12-03T15:20:44,503 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:44,506 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:44,506 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/C is initiating minor compaction (all files) 2024-12-03T15:20:44,506 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/C in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:44,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741898_1074 (size=12949) 2024-12-03T15:20:44,506 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3d14e80e32b34088b526a033a471b9cd, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/5c8bef8466ab43cda1432782b4fc959e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/79a0dd55fb824523911e11d96dbfa7f7] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.3 K 2024-12-03T15:20:44,507 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d14e80e32b34088b526a033a471b9cd, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1733239241349 2024-12-03T15:20:44,508 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c8bef8466ab43cda1432782b4fc959e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733239242525 2024-12-03T15:20:44,508 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 79a0dd55fb824523911e11d96dbfa7f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1733239243159 2024-12-03T15:20:44,525 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#C#compaction#60 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:44,527 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/773dea7427124274ab617049bb7cc5dc is 50, key is test_row_0/C:col10/1733239243161/Put/seqid=0 2024-12-03T15:20:44,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741899_1075 (size=12949) 2024-12-03T15:20:44,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-03T15:20:44,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239304578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239304580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239304584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239304584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239304586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239304785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239304784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239304787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239304796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:44,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239304796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:44,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-03T15:20:44,876 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/bb60750fbe5b4da29cf053908ce9d6f3 2024-12-03T15:20:44,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/d3a4fa11665d4d1393b6df7a74fe29ac is 50, key is test_row_0/B:col10/1733239243824/Put/seqid=0 2024-12-03T15:20:44,941 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/8dd736271a374461a81aa670ecf9d2f9 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8dd736271a374461a81aa670ecf9d2f9 2024-12-03T15:20:44,955 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/A of 267a7e743c7c4973345ceaeae71cae1f into 8dd736271a374461a81aa670ecf9d2f9(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:44,955 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:44,955 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/A, priority=13, startTime=1733239244408; duration=0sec 2024-12-03T15:20:44,956 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:44,956 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:A 2024-12-03T15:20:44,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741900_1076 (size=12301) 2024-12-03T15:20:44,973 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/d3a4fa11665d4d1393b6df7a74fe29ac 2024-12-03T15:20:44,986 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/773dea7427124274ab617049bb7cc5dc as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/773dea7427124274ab617049bb7cc5dc 2024-12-03T15:20:44,997 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/C of 267a7e743c7c4973345ceaeae71cae1f into 773dea7427124274ab617049bb7cc5dc(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:44,997 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:44,997 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/C, priority=13, startTime=1733239244409; duration=0sec 2024-12-03T15:20:44,997 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:44,997 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:C 2024-12-03T15:20:45,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/ddcf5a3936ca4b6995cdc4e634350d16 is 50, key is test_row_0/C:col10/1733239243824/Put/seqid=0 2024-12-03T15:20:45,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741901_1077 (size=12301) 2024-12-03T15:20:45,015 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/ddcf5a3936ca4b6995cdc4e634350d16 2024-12-03T15:20:45,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/bb60750fbe5b4da29cf053908ce9d6f3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/bb60750fbe5b4da29cf053908ce9d6f3 2024-12-03T15:20:45,036 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/bb60750fbe5b4da29cf053908ce9d6f3, entries=150, sequenceid=292, filesize=12.0 K 2024-12-03T15:20:45,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/d3a4fa11665d4d1393b6df7a74fe29ac as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/d3a4fa11665d4d1393b6df7a74fe29ac 2024-12-03T15:20:45,051 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/d3a4fa11665d4d1393b6df7a74fe29ac, entries=150, sequenceid=292, filesize=12.0 K 2024-12-03T15:20:45,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/ddcf5a3936ca4b6995cdc4e634350d16 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/ddcf5a3936ca4b6995cdc4e634350d16 2024-12-03T15:20:45,069 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/ddcf5a3936ca4b6995cdc4e634350d16, entries=150, sequenceid=292, filesize=12.0 K 2024-12-03T15:20:45,070 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for 267a7e743c7c4973345ceaeae71cae1f in 652ms, sequenceid=292, compaction requested=false 2024-12-03T15:20:45,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:45,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:45,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-03T15:20:45,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-03T15:20:45,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:45,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-03T15:20:45,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-03T15:20:45,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 846 msec 2024-12-03T15:20:45,129 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 860 msec 2024-12-03T15:20:45,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:45,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:45,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:45,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:45,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:45,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:45,202 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/2971f8ae19e94ab9892c09ca4d5ee234 is 50, key is test_row_0/A:col10/1733239245102/Put/seqid=0 2024-12-03T15:20:45,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741902_1078 (size=12301) 2024-12-03T15:20:45,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239305252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239305260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239305262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/2971f8ae19e94ab9892c09ca4d5ee234 2024-12-03T15:20:45,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239305263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239305266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,300 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b053767989d14e46b071d2eda49ddeca is 50, key is test_row_0/B:col10/1733239245102/Put/seqid=0 2024-12-03T15:20:45,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741903_1079 (size=12301) 2024-12-03T15:20:45,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-03T15:20:45,363 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-03T15:20:45,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239305366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,367 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:20:45,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-03T15:20:45,370 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:20:45,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-03T15:20:45,371 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:20:45,371 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:20:45,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239305374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239305373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239305378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239305379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-03T15:20:45,525 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:45,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:45,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:45,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:45,526 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:45,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:45,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:45,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239305571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239305577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239305584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239305595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239305598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-03T15:20:45,678 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:45,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:45,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:45,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:45,680 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:45,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:45,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b053767989d14e46b071d2eda49ddeca 2024-12-03T15:20:45,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/114231fb8d1545f6a43a0aab298e3fc0 is 50, key is test_row_0/C:col10/1733239245102/Put/seqid=0 2024-12-03T15:20:45,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741904_1080 (size=12301) 2024-12-03T15:20:45,777 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/114231fb8d1545f6a43a0aab298e3fc0 2024-12-03T15:20:45,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/2971f8ae19e94ab9892c09ca4d5ee234 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2971f8ae19e94ab9892c09ca4d5ee234 2024-12-03T15:20:45,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2971f8ae19e94ab9892c09ca4d5ee234, entries=150, sequenceid=313, filesize=12.0 K 2024-12-03T15:20:45,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b053767989d14e46b071d2eda49ddeca as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b053767989d14e46b071d2eda49ddeca 2024-12-03T15:20:45,833 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:45,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:45,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:45,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:45,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:45,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:45,856 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b053767989d14e46b071d2eda49ddeca, entries=150, sequenceid=313, filesize=12.0 K 2024-12-03T15:20:45,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/114231fb8d1545f6a43a0aab298e3fc0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/114231fb8d1545f6a43a0aab298e3fc0 2024-12-03T15:20:45,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239305874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239305883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239305889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/114231fb8d1545f6a43a0aab298e3fc0, entries=150, sequenceid=313, filesize=12.0 K 2024-12-03T15:20:45,905 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 267a7e743c7c4973345ceaeae71cae1f in 791ms, sequenceid=313, compaction requested=true 2024-12-03T15:20:45,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:45,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:20:45,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:45,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:20:45,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:20:45,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:20:45,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-03T15:20:45,906 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:45,906 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:45,908 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:45,908 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/C is initiating minor compaction (all files) 2024-12-03T15:20:45,909 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/C in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:45,909 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/773dea7427124274ab617049bb7cc5dc, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/ddcf5a3936ca4b6995cdc4e634350d16, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/114231fb8d1545f6a43a0aab298e3fc0] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.7 K 2024-12-03T15:20:45,909 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:45,909 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/A is initiating minor compaction (all files) 2024-12-03T15:20:45,909 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/A in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:45,909 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8dd736271a374461a81aa670ecf9d2f9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/bb60750fbe5b4da29cf053908ce9d6f3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2971f8ae19e94ab9892c09ca4d5ee234] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.7 K 2024-12-03T15:20:45,910 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 773dea7427124274ab617049bb7cc5dc, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1733239243159 2024-12-03T15:20:45,910 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8dd736271a374461a81aa670ecf9d2f9, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1733239243159 2024-12-03T15:20:45,911 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting ddcf5a3936ca4b6995cdc4e634350d16, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733239243819 2024-12-03T15:20:45,911 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb60750fbe5b4da29cf053908ce9d6f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733239243819 2024-12-03T15:20:45,911 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 114231fb8d1545f6a43a0aab298e3fc0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733239245102 2024-12-03T15:20:45,912 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2971f8ae19e94ab9892c09ca4d5ee234, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733239245102 2024-12-03T15:20:45,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:45,920 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-03T15:20:45,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:45,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:45,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:45,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:45,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:45,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:45,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/06811816bfe249379b37146c06bf1567 is 50, key is test_row_0/A:col10/1733239245252/Put/seqid=0 2024-12-03T15:20:45,932 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#A#compaction#67 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:45,932 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/122fcba3558e4ea8a094400fae25cf63 is 50, key is test_row_0/A:col10/1733239245102/Put/seqid=0 2024-12-03T15:20:45,955 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#C#compaction#68 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:45,956 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/1a5be30dbed240ba9fa0e1e36790a05f is 50, key is test_row_0/C:col10/1733239245102/Put/seqid=0 2024-12-03T15:20:45,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741905_1081 (size=14741) 2024-12-03T15:20:45,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/06811816bfe249379b37146c06bf1567 2024-12-03T15:20:45,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-03T15:20:45,996 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:45,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:45,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:45,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:45,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:45,998 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:45,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239306008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239306009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/50a760e4b7794cd19599d21b3179db48 is 50, key is test_row_0/B:col10/1733239245252/Put/seqid=0 2024-12-03T15:20:46,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741906_1082 (size=13051) 2024-12-03T15:20:46,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741907_1083 (size=13051) 2024-12-03T15:20:46,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741908_1084 (size=12301) 2024-12-03T15:20:46,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239306110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239306111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,159 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,160 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:46,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:46,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,160 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,313 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,313 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:46,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:46,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,314 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239306315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239306316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239306385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239306385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239306401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,470 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/50a760e4b7794cd19599d21b3179db48 2024-12-03T15:20:46,470 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:46,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:46,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-03T15:20:46,475 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/122fcba3558e4ea8a094400fae25cf63 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/122fcba3558e4ea8a094400fae25cf63 2024-12-03T15:20:46,479 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/1a5be30dbed240ba9fa0e1e36790a05f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1a5be30dbed240ba9fa0e1e36790a05f 2024-12-03T15:20:46,489 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/C of 267a7e743c7c4973345ceaeae71cae1f into 1a5be30dbed240ba9fa0e1e36790a05f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:46,489 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/A of 267a7e743c7c4973345ceaeae71cae1f into 122fcba3558e4ea8a094400fae25cf63(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:46,489 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:46,489 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:46,489 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/C, priority=13, startTime=1733239245906; duration=0sec 2024-12-03T15:20:46,489 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/A, priority=13, startTime=1733239245905; duration=0sec 2024-12-03T15:20:46,489 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:46,489 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:C 2024-12-03T15:20:46,489 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:46,489 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:A 2024-12-03T15:20:46,489 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:46,491 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:46,491 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/B is initiating minor compaction (all files) 2024-12-03T15:20:46,491 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/B in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,491 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/fbcc3a8ed3604b1487e08bcd2709e2a6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/d3a4fa11665d4d1393b6df7a74fe29ac, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b053767989d14e46b071d2eda49ddeca] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.7 K 2024-12-03T15:20:46,492 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting fbcc3a8ed3604b1487e08bcd2709e2a6, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1733239243159 2024-12-03T15:20:46,492 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting d3a4fa11665d4d1393b6df7a74fe29ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733239243819 2024-12-03T15:20:46,493 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b053767989d14e46b071d2eda49ddeca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733239245102 2024-12-03T15:20:46,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/67ef04eb77944548bb7637404bb0217a is 50, key is test_row_0/C:col10/1733239245252/Put/seqid=0 2024-12-03T15:20:46,504 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#B#compaction#71 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:46,504 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/1363272902494969a445d1f55f56e163 is 50, key is test_row_0/B:col10/1733239245102/Put/seqid=0 2024-12-03T15:20:46,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741910_1086 (size=13051) 2024-12-03T15:20:46,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741909_1085 (size=12301) 2024-12-03T15:20:46,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/67ef04eb77944548bb7637404bb0217a 2024-12-03T15:20:46,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/06811816bfe249379b37146c06bf1567 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/06811816bfe249379b37146c06bf1567 2024-12-03T15:20:46,553 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/1363272902494969a445d1f55f56e163 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/1363272902494969a445d1f55f56e163 2024-12-03T15:20:46,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/06811816bfe249379b37146c06bf1567, entries=200, sequenceid=332, filesize=14.4 K 2024-12-03T15:20:46,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/50a760e4b7794cd19599d21b3179db48 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/50a760e4b7794cd19599d21b3179db48 2024-12-03T15:20:46,570 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/B of 267a7e743c7c4973345ceaeae71cae1f into 1363272902494969a445d1f55f56e163(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:46,570 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:46,570 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/B, priority=13, startTime=1733239245905; duration=0sec 2024-12-03T15:20:46,571 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:46,571 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:B 2024-12-03T15:20:46,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/50a760e4b7794cd19599d21b3179db48, entries=150, sequenceid=332, filesize=12.0 K 2024-12-03T15:20:46,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/67ef04eb77944548bb7637404bb0217a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/67ef04eb77944548bb7637404bb0217a 2024-12-03T15:20:46,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/67ef04eb77944548bb7637404bb0217a, entries=150, sequenceid=332, filesize=12.0 K 2024-12-03T15:20:46,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 267a7e743c7c4973345ceaeae71cae1f in 663ms, sequenceid=332, compaction requested=false 2024-12-03T15:20:46,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:46,625 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-03T15:20:46,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:46,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:46,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:46,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:46,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:46,625 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:46,625 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:46,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:46,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/c5be98809922432aaa782dbbe4ed932e is 50, key is test_row_0/A:col10/1733239245990/Put/seqid=0 2024-12-03T15:20:46,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239306653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239306656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741911_1087 (size=14741) 2024-12-03T15:20:46,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239306758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239306761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,779 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:46,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:46,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,781 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,935 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:46,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:46,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:46,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:46,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239306966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:46,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:46,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239306968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,079 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/c5be98809922432aaa782dbbe4ed932e 2024-12-03T15:20:47,088 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:47,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:47,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b844107d16294aeda346eba6a0310b31 is 50, key is test_row_0/B:col10/1733239245990/Put/seqid=0 2024-12-03T15:20:47,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741912_1088 (size=12301) 2024-12-03T15:20:47,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b844107d16294aeda346eba6a0310b31 2024-12-03T15:20:47,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/f9b933dd5fe342d3b68f63f79685cbe9 is 50, key is test_row_0/C:col10/1733239245990/Put/seqid=0 2024-12-03T15:20:47,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741913_1089 (size=12301) 2024-12-03T15:20:47,185 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/f9b933dd5fe342d3b68f63f79685cbe9 2024-12-03T15:20:47,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/c5be98809922432aaa782dbbe4ed932e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/c5be98809922432aaa782dbbe4ed932e 2024-12-03T15:20:47,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/c5be98809922432aaa782dbbe4ed932e, entries=200, sequenceid=354, filesize=14.4 K 2024-12-03T15:20:47,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b844107d16294aeda346eba6a0310b31 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b844107d16294aeda346eba6a0310b31 2024-12-03T15:20:47,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b844107d16294aeda346eba6a0310b31, entries=150, sequenceid=354, filesize=12.0 K 2024-12-03T15:20:47,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/f9b933dd5fe342d3b68f63f79685cbe9 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/f9b933dd5fe342d3b68f63f79685cbe9 2024-12-03T15:20:47,246 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:47,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:47,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,251 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/f9b933dd5fe342d3b68f63f79685cbe9, entries=150, sequenceid=354, filesize=12.0 K 2024-12-03T15:20:47,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,258 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 267a7e743c7c4973345ceaeae71cae1f in 633ms, sequenceid=354, compaction requested=true 2024-12-03T15:20:47,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:47,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:20:47,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:47,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:20:47,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:20:47,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:20:47,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:20:47,258 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:47,258 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:47,260 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42533 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:47,261 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/A is initiating minor compaction (all files) 2024-12-03T15:20:47,261 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/A in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,261 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/122fcba3558e4ea8a094400fae25cf63, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/06811816bfe249379b37146c06bf1567, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/c5be98809922432aaa782dbbe4ed932e] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=41.5 K 2024-12-03T15:20:47,261 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:47,262 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/C is initiating minor compaction (all files) 2024-12-03T15:20:47,262 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/C in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,262 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 122fcba3558e4ea8a094400fae25cf63, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733239245102 2024-12-03T15:20:47,262 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1a5be30dbed240ba9fa0e1e36790a05f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/67ef04eb77944548bb7637404bb0217a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/f9b933dd5fe342d3b68f63f79685cbe9] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.8 K 2024-12-03T15:20:47,263 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06811816bfe249379b37146c06bf1567, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733239245225 2024-12-03T15:20:47,263 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a5be30dbed240ba9fa0e1e36790a05f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733239245102 2024-12-03T15:20:47,263 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5be98809922432aaa782dbbe4ed932e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733239245990 2024-12-03T15:20:47,264 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 67ef04eb77944548bb7637404bb0217a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733239245252 2024-12-03T15:20:47,269 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting f9b933dd5fe342d3b68f63f79685cbe9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733239245990 2024-12-03T15:20:47,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-03T15:20:47,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:47,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:47,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:47,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:47,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:47,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:47,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:47,287 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#C#compaction#75 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:47,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/44602c8f0a864f9980f2a91414abc669 is 50, key is test_row_0/A:col10/1733239246642/Put/seqid=0 2024-12-03T15:20:47,288 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/a8ace802445a4c97b9306a5f61fc4c33 is 50, key is test_row_0/C:col10/1733239245990/Put/seqid=0 2024-12-03T15:20:47,313 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#A#compaction#77 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:47,313 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/1446da39e863477597182cc7cf8b6ea3 is 50, key is test_row_0/A:col10/1733239245990/Put/seqid=0 2024-12-03T15:20:47,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:47,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239307328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:47,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239307328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741914_1090 (size=12301) 2024-12-03T15:20:47,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/44602c8f0a864f9980f2a91414abc669 2024-12-03T15:20:47,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741915_1091 (size=13153) 2024-12-03T15:20:47,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/8aa1cdcfb33b426a9d9eecf27d7edd42 is 50, key is test_row_0/B:col10/1733239246642/Put/seqid=0 2024-12-03T15:20:47,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741916_1092 (size=13153) 2024-12-03T15:20:47,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741917_1093 (size=12301) 2024-12-03T15:20:47,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/8aa1cdcfb33b426a9d9eecf27d7edd42 2024-12-03T15:20:47,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:47,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239307389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:47,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239307396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,405 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,406 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:47,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/3d39f9b9c154419f945c4c7ccb077f1f is 50, key is test_row_0/C:col10/1733239246642/Put/seqid=0 2024-12-03T15:20:47,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:47,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,406 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:47,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239307405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:47,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239307432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:47,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239307432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741918_1094 (size=12301) 2024-12-03T15:20:47,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-03T15:20:47,560 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:47,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:47,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,561 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:47,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239307639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:47,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239307646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,714 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:47,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:47,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,718 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,768 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/a8ace802445a4c97b9306a5f61fc4c33 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a8ace802445a4c97b9306a5f61fc4c33 2024-12-03T15:20:47,782 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/1446da39e863477597182cc7cf8b6ea3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/1446da39e863477597182cc7cf8b6ea3 2024-12-03T15:20:47,790 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/A of 267a7e743c7c4973345ceaeae71cae1f into 1446da39e863477597182cc7cf8b6ea3(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:47,790 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:47,790 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/A, priority=13, startTime=1733239247258; duration=0sec 2024-12-03T15:20:47,791 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:47,791 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:A 2024-12-03T15:20:47,791 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:47,793 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:47,793 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/B is initiating minor compaction (all files) 2024-12-03T15:20:47,793 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/B in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,793 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/1363272902494969a445d1f55f56e163, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/50a760e4b7794cd19599d21b3179db48, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b844107d16294aeda346eba6a0310b31] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.8 K 2024-12-03T15:20:47,795 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1363272902494969a445d1f55f56e163, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733239245102 2024-12-03T15:20:47,796 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50a760e4b7794cd19599d21b3179db48, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733239245252 2024-12-03T15:20:47,797 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting b844107d16294aeda346eba6a0310b31, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733239245990 2024-12-03T15:20:47,803 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/C of 267a7e743c7c4973345ceaeae71cae1f into a8ace802445a4c97b9306a5f61fc4c33(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:47,803 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:47,803 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/C, priority=13, startTime=1733239247258; duration=0sec 2024-12-03T15:20:47,803 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:47,803 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:C 2024-12-03T15:20:47,827 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#B#compaction#80 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:47,842 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/ab06c541c0d34653bfe6b4d4b2106ed0 is 50, key is test_row_0/B:col10/1733239245990/Put/seqid=0 2024-12-03T15:20:47,866 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/3d39f9b9c154419f945c4c7ccb077f1f 2024-12-03T15:20:47,882 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:47,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:47,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:47,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:47,886 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:47,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/44602c8f0a864f9980f2a91414abc669 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/44602c8f0a864f9980f2a91414abc669 2024-12-03T15:20:47,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/44602c8f0a864f9980f2a91414abc669, entries=150, sequenceid=372, filesize=12.0 K 2024-12-03T15:20:47,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/8aa1cdcfb33b426a9d9eecf27d7edd42 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/8aa1cdcfb33b426a9d9eecf27d7edd42 2024-12-03T15:20:47,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/8aa1cdcfb33b426a9d9eecf27d7edd42, entries=150, sequenceid=372, filesize=12.0 K 2024-12-03T15:20:47,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/3d39f9b9c154419f945c4c7ccb077f1f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3d39f9b9c154419f945c4c7ccb077f1f 2024-12-03T15:20:47,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741919_1095 (size=13153) 2024-12-03T15:20:47,917 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/ab06c541c0d34653bfe6b4d4b2106ed0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ab06c541c0d34653bfe6b4d4b2106ed0 2024-12-03T15:20:47,925 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/B of 267a7e743c7c4973345ceaeae71cae1f into ab06c541c0d34653bfe6b4d4b2106ed0(size=12.8 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:47,926 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:47,926 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/B, priority=13, startTime=1733239247258; duration=0sec 2024-12-03T15:20:47,926 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:47,926 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:B 2024-12-03T15:20:47,930 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3d39f9b9c154419f945c4c7ccb077f1f, entries=150, sequenceid=372, filesize=12.0 K 2024-12-03T15:20:47,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 267a7e743c7c4973345ceaeae71cae1f in 655ms, sequenceid=372, compaction requested=false 2024-12-03T15:20:47,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:47,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:47,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-03T15:20:47,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:47,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:47,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:47,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:47,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:47,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:47,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/392108a3bf874c2983c6aecea77380f9 is 50, key is test_row_0/A:col10/1733239247320/Put/seqid=0 2024-12-03T15:20:48,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741920_1096 (size=12301) 2024-12-03T15:20:48,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/392108a3bf874c2983c6aecea77380f9 2024-12-03T15:20:48,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/0fbd9577cfed45fc9e167d3b759b1236 is 50, key is test_row_0/B:col10/1733239247320/Put/seqid=0 2024-12-03T15:20:48,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:48,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239308035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:48,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239308036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,040 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:48,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:48,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,041 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741921_1097 (size=12301) 2024-12-03T15:20:48,088 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/0fbd9577cfed45fc9e167d3b759b1236 2024-12-03T15:20:48,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/674221656b35499e849d4fa703ef3a81 is 50, key is test_row_0/C:col10/1733239247320/Put/seqid=0 2024-12-03T15:20:48,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:48,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239308139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:48,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239308140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,194 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:48,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:48,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741922_1098 (size=12301) 2024-12-03T15:20:48,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:48,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239308346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:48,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239308346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,361 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:48,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:48,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,518 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:48,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:48,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/674221656b35499e849d4fa703ef3a81 2024-12-03T15:20:48,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:48,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239308651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:48,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239308656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,673 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:48,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:48,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,674 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/392108a3bf874c2983c6aecea77380f9 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/392108a3bf874c2983c6aecea77380f9 2024-12-03T15:20:48,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/392108a3bf874c2983c6aecea77380f9, entries=150, sequenceid=394, filesize=12.0 K 2024-12-03T15:20:48,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/0fbd9577cfed45fc9e167d3b759b1236 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0fbd9577cfed45fc9e167d3b759b1236 2024-12-03T15:20:48,822 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0fbd9577cfed45fc9e167d3b759b1236, entries=150, sequenceid=394, filesize=12.0 K 2024-12-03T15:20:48,827 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:48,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:48,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:48,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/674221656b35499e849d4fa703ef3a81 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/674221656b35499e849d4fa703ef3a81 2024-12-03T15:20:48,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/674221656b35499e849d4fa703ef3a81, entries=150, sequenceid=394, filesize=12.0 K 2024-12-03T15:20:48,872 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 267a7e743c7c4973345ceaeae71cae1f in 922ms, sequenceid=394, compaction requested=true 2024-12-03T15:20:48,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:48,872 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:48,873 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:20:48,873 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:48,873 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:48,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:20:48,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:48,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:20:48,874 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:48,875 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:48,875 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/A is initiating minor compaction (all files) 2024-12-03T15:20:48,875 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/A in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,876 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/1446da39e863477597182cc7cf8b6ea3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/44602c8f0a864f9980f2a91414abc669, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/392108a3bf874c2983c6aecea77380f9] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.9 K 2024-12-03T15:20:48,879 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1446da39e863477597182cc7cf8b6ea3, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733239245990 2024-12-03T15:20:48,879 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:48,879 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/B is initiating minor compaction (all files) 2024-12-03T15:20:48,879 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/B in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,880 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ab06c541c0d34653bfe6b4d4b2106ed0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/8aa1cdcfb33b426a9d9eecf27d7edd42, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0fbd9577cfed45fc9e167d3b759b1236] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.9 K 2024-12-03T15:20:48,880 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44602c8f0a864f9980f2a91414abc669, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733239246642 2024-12-03T15:20:48,881 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting ab06c541c0d34653bfe6b4d4b2106ed0, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733239245990 2024-12-03T15:20:48,881 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 392108a3bf874c2983c6aecea77380f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733239247301 2024-12-03T15:20:48,882 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8aa1cdcfb33b426a9d9eecf27d7edd42, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733239246642 2024-12-03T15:20:48,882 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fbd9577cfed45fc9e167d3b759b1236, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733239247301 2024-12-03T15:20:48,952 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#B#compaction#84 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:48,953 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b823d4cd58c74701a279f068583d6e00 is 50, key is test_row_0/B:col10/1733239247320/Put/seqid=0 2024-12-03T15:20:48,967 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#A#compaction#85 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:48,968 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/bb31232ccc584d5bb1db97867648cf9b is 50, key is test_row_0/A:col10/1733239247320/Put/seqid=0 2024-12-03T15:20:48,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741923_1099 (size=13255) 2024-12-03T15:20:48,985 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:48,986 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-03T15:20:48,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:48,987 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-03T15:20:48,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:48,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:48,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:48,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:48,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:48,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:49,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/4b0e52fe6a224552b240fded1f61a923 is 50, key is test_row_0/A:col10/1733239248034/Put/seqid=0 2024-12-03T15:20:49,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741924_1100 (size=13255) 2024-12-03T15:20:49,017 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/bb31232ccc584d5bb1db97867648cf9b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/bb31232ccc584d5bb1db97867648cf9b 2024-12-03T15:20:49,033 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/A of 267a7e743c7c4973345ceaeae71cae1f into bb31232ccc584d5bb1db97867648cf9b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:49,034 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:49,034 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/A, priority=13, startTime=1733239248872; duration=0sec 2024-12-03T15:20:49,034 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:49,034 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:A 2024-12-03T15:20:49,034 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:49,037 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:49,037 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/C is initiating minor compaction (all files) 2024-12-03T15:20:49,038 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/C in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:49,038 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a8ace802445a4c97b9306a5f61fc4c33, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3d39f9b9c154419f945c4c7ccb077f1f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/674221656b35499e849d4fa703ef3a81] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=36.9 K 2024-12-03T15:20:49,038 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8ace802445a4c97b9306a5f61fc4c33, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733239245990 2024-12-03T15:20:49,039 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d39f9b9c154419f945c4c7ccb077f1f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733239246642 2024-12-03T15:20:49,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741925_1101 (size=12301) 2024-12-03T15:20:49,040 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 674221656b35499e849d4fa703ef3a81, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733239247301 2024-12-03T15:20:49,040 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/4b0e52fe6a224552b240fded1f61a923 2024-12-03T15:20:49,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/0c8da36d2f9c4563a4f0e3fe20cd5f86 is 50, key is test_row_0/B:col10/1733239248034/Put/seqid=0 2024-12-03T15:20:49,059 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#C#compaction#88 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:49,060 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/eaa0dab000ec472c9aa805bf19c10096 is 50, key is test_row_0/C:col10/1733239247320/Put/seqid=0 2024-12-03T15:20:49,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741926_1102 (size=12301) 2024-12-03T15:20:49,075 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/0c8da36d2f9c4563a4f0e3fe20cd5f86 2024-12-03T15:20:49,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741927_1103 (size=13255) 2024-12-03T15:20:49,094 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/eaa0dab000ec472c9aa805bf19c10096 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/eaa0dab000ec472c9aa805bf19c10096 2024-12-03T15:20:49,103 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/C of 267a7e743c7c4973345ceaeae71cae1f into eaa0dab000ec472c9aa805bf19c10096(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:49,103 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:49,103 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/C, priority=13, startTime=1733239248874; duration=0sec 2024-12-03T15:20:49,104 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:49,104 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:C 2024-12-03T15:20:49,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/06c03fed9a864209ac51da5f82ee9d37 is 50, key is test_row_0/C:col10/1733239248034/Put/seqid=0 2024-12-03T15:20:49,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741928_1104 (size=12301) 2024-12-03T15:20:49,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. as already flushing 2024-12-03T15:20:49,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:49,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:49,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239309215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:49,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:49,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239309214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:49,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:49,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239309324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:49,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:49,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239309324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:49,398 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/b823d4cd58c74701a279f068583d6e00 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b823d4cd58c74701a279f068583d6e00 2024-12-03T15:20:49,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:49,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41196 deadline: 1733239309406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:49,419 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:20:49,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:49,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41154 deadline: 1733239309419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:49,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:49,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41168 deadline: 1733239309419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:49,421 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:20:49,422 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:20:49,424 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/B of 267a7e743c7c4973345ceaeae71cae1f into b823d4cd58c74701a279f068583d6e00(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:49,424 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:49,424 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/B, priority=13, startTime=1733239248873; duration=0sec 2024-12-03T15:20:49,424 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:49,424 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:B 2024-12-03T15:20:49,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-03T15:20:49,512 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/06c03fed9a864209ac51da5f82ee9d37 2024-12-03T15:20:49,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/4b0e52fe6a224552b240fded1f61a923 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/4b0e52fe6a224552b240fded1f61a923 2024-12-03T15:20:49,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:49,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239309535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:49,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:49,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239309535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:49,546 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/4b0e52fe6a224552b240fded1f61a923, entries=150, sequenceid=411, filesize=12.0 K 2024-12-03T15:20:49,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/0c8da36d2f9c4563a4f0e3fe20cd5f86 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0c8da36d2f9c4563a4f0e3fe20cd5f86 2024-12-03T15:20:49,583 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0c8da36d2f9c4563a4f0e3fe20cd5f86, entries=150, sequenceid=411, filesize=12.0 K 2024-12-03T15:20:49,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/06c03fed9a864209ac51da5f82ee9d37 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/06c03fed9a864209ac51da5f82ee9d37 2024-12-03T15:20:49,613 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/06c03fed9a864209ac51da5f82ee9d37, entries=150, sequenceid=411, filesize=12.0 K 2024-12-03T15:20:49,614 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 267a7e743c7c4973345ceaeae71cae1f in 627ms, sequenceid=411, compaction requested=false 2024-12-03T15:20:49,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:49,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:49,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-03T15:20:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-03T15:20:49,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-03T15:20:49,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.2450 sec 2024-12-03T15:20:49,625 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 4.2550 sec 2024-12-03T15:20:49,863 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-03T15:20:49,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:49,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:49,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:49,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:49,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:49,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:49,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:49,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/0df20bfa43e84f848891e23e5f55da19 is 50, key is test_row_0/A:col10/1733239249854/Put/seqid=0 2024-12-03T15:20:49,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:49,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239309926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:49,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:49,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239309920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:49,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741929_1105 (size=17181) 2024-12-03T15:20:50,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:50,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239310036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:50,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:50,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239310037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:50,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:50,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239310251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:50,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:50,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239310257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:50,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/0df20bfa43e84f848891e23e5f55da19 2024-12-03T15:20:50,380 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/66f567109a61484698762d2452bafa67 is 50, key is test_row_0/B:col10/1733239249854/Put/seqid=0 2024-12-03T15:20:50,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741930_1106 (size=12301) 2024-12-03T15:20:50,426 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/66f567109a61484698762d2452bafa67 2024-12-03T15:20:50,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/7e4949d496df4edda7094986694fa768 is 50, key is test_row_0/C:col10/1733239249854/Put/seqid=0 2024-12-03T15:20:50,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741931_1107 (size=12301) 2024-12-03T15:20:50,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/7e4949d496df4edda7094986694fa768 2024-12-03T15:20:50,544 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/0df20bfa43e84f848891e23e5f55da19 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/0df20bfa43e84f848891e23e5f55da19 2024-12-03T15:20:50,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/0df20bfa43e84f848891e23e5f55da19, entries=250, sequenceid=435, filesize=16.8 K 2024-12-03T15:20:50,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/66f567109a61484698762d2452bafa67 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/66f567109a61484698762d2452bafa67 2024-12-03T15:20:50,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:50,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41166 deadline: 1733239310569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:50,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/66f567109a61484698762d2452bafa67, entries=150, sequenceid=435, filesize=12.0 K 2024-12-03T15:20:50,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/7e4949d496df4edda7094986694fa768 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/7e4949d496df4edda7094986694fa768 2024-12-03T15:20:50,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/7e4949d496df4edda7094986694fa768, entries=150, sequenceid=435, filesize=12.0 K 2024-12-03T15:20:50,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 267a7e743c7c4973345ceaeae71cae1f in 736ms, sequenceid=435, compaction requested=true 2024-12-03T15:20:50,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:50,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:20:50,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:50,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:20:50,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:20:50,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:20:50,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-03T15:20:50,602 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:50,602 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:50,604 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:50,604 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/C is initiating minor compaction (all files) 2024-12-03T15:20:50,604 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/C in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:50,604 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/eaa0dab000ec472c9aa805bf19c10096, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/06c03fed9a864209ac51da5f82ee9d37, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/7e4949d496df4edda7094986694fa768] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=37.0 K 2024-12-03T15:20:50,605 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42737 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:50,605 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/A is initiating minor compaction (all files) 2024-12-03T15:20:50,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:50,606 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting eaa0dab000ec472c9aa805bf19c10096, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733239247301 2024-12-03T15:20:50,606 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/A in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:50,606 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/bb31232ccc584d5bb1db97867648cf9b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/4b0e52fe6a224552b240fded1f61a923, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/0df20bfa43e84f848891e23e5f55da19] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=41.7 K 2024-12-03T15:20:50,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-03T15:20:50,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:50,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:50,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:50,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:50,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:50,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:50,608 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06c03fed9a864209ac51da5f82ee9d37, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733239248018 2024-12-03T15:20:50,608 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting bb31232ccc584d5bb1db97867648cf9b, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733239247301 2024-12-03T15:20:50,608 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e4949d496df4edda7094986694fa768, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733239249213 2024-12-03T15:20:50,615 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b0e52fe6a224552b240fded1f61a923, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733239248018 2024-12-03T15:20:50,617 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 0df20bfa43e84f848891e23e5f55da19, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733239249206 2024-12-03T15:20:50,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/09f17aa907fc43c9977aa9a7009b3073 is 50, key is test_row_0/A:col10/1733239249905/Put/seqid=0 2024-12-03T15:20:50,634 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#C#compaction#94 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:50,635 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/db0d2a70b2684f4d843326bfb9bb1721 is 50, key is test_row_0/C:col10/1733239249854/Put/seqid=0 2024-12-03T15:20:50,673 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#A#compaction#95 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:50,673 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/2b444bc705184acc948567177d0765d7 is 50, key is test_row_0/A:col10/1733239249854/Put/seqid=0 2024-12-03T15:20:50,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741932_1108 (size=14741) 2024-12-03T15:20:50,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/09f17aa907fc43c9977aa9a7009b3073 2024-12-03T15:20:50,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741933_1109 (size=13357) 2024-12-03T15:20:50,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/c0d5b0fea3d24cd99c94999266cc89e3 is 50, key is test_row_0/B:col10/1733239249905/Put/seqid=0 2024-12-03T15:20:50,727 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/db0d2a70b2684f4d843326bfb9bb1721 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/db0d2a70b2684f4d843326bfb9bb1721 2024-12-03T15:20:50,740 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/C of 267a7e743c7c4973345ceaeae71cae1f into db0d2a70b2684f4d843326bfb9bb1721(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:50,740 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:50,740 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/C, priority=13, startTime=1733239250599; duration=0sec 2024-12-03T15:20:50,740 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:50,740 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:C 2024-12-03T15:20:50,740 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:50,753 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:50,753 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/B is initiating minor compaction (all files) 2024-12-03T15:20:50,753 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/B in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:50,753 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b823d4cd58c74701a279f068583d6e00, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0c8da36d2f9c4563a4f0e3fe20cd5f86, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/66f567109a61484698762d2452bafa67] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=37.0 K 2024-12-03T15:20:50,754 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting b823d4cd58c74701a279f068583d6e00, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=394, earliestPutTs=1733239247301 2024-12-03T15:20:50,755 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c8da36d2f9c4563a4f0e3fe20cd5f86, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733239248018 2024-12-03T15:20:50,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741934_1110 (size=13357) 2024-12-03T15:20:50,756 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66f567109a61484698762d2452bafa67, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733239249213 2024-12-03T15:20:50,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741935_1111 (size=12301) 2024-12-03T15:20:50,765 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/2b444bc705184acc948567177d0765d7 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2b444bc705184acc948567177d0765d7 2024-12-03T15:20:50,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/c0d5b0fea3d24cd99c94999266cc89e3 2024-12-03T15:20:50,774 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/A of 267a7e743c7c4973345ceaeae71cae1f into 2b444bc705184acc948567177d0765d7(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:50,775 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:50,775 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/A, priority=13, startTime=1733239250599; duration=0sec 2024-12-03T15:20:50,775 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:50,775 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:A 2024-12-03T15:20:50,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/77c8abfe16f94c1ab2c8eb58a3a9690c is 50, key is test_row_0/C:col10/1733239249905/Put/seqid=0 2024-12-03T15:20:50,807 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#B#compaction#98 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:50,807 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/18594a529ba047e1b5593fc029454b9a is 50, key is test_row_0/B:col10/1733239249854/Put/seqid=0 2024-12-03T15:20:50,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:50,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41192 deadline: 1733239310858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:50,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741936_1112 (size=12301) 2024-12-03T15:20:50,924 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/77c8abfe16f94c1ab2c8eb58a3a9690c 2024-12-03T15:20:50,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741937_1113 (size=13357) 2024-12-03T15:20:50,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/09f17aa907fc43c9977aa9a7009b3073 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/09f17aa907fc43c9977aa9a7009b3073 2024-12-03T15:20:50,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/09f17aa907fc43c9977aa9a7009b3073, entries=200, sequenceid=451, filesize=14.4 K 2024-12-03T15:20:50,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/c0d5b0fea3d24cd99c94999266cc89e3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c0d5b0fea3d24cd99c94999266cc89e3 2024-12-03T15:20:50,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c0d5b0fea3d24cd99c94999266cc89e3, entries=150, sequenceid=451, filesize=12.0 K 2024-12-03T15:20:50,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/77c8abfe16f94c1ab2c8eb58a3a9690c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/77c8abfe16f94c1ab2c8eb58a3a9690c 2024-12-03T15:20:50,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/77c8abfe16f94c1ab2c8eb58a3a9690c, entries=150, sequenceid=451, filesize=12.0 K 2024-12-03T15:20:50,986 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 267a7e743c7c4973345ceaeae71cae1f in 380ms, sequenceid=451, compaction requested=false 2024-12-03T15:20:50,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:51,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:51,003 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-03T15:20:51,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:51,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:51,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:51,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:51,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:51,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:51,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/277c701e2a0f489685e35d50d8f55a8d is 50, key is test_row_0/A:col10/1733239250834/Put/seqid=0 2024-12-03T15:20:51,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741938_1114 (size=14741) 2024-12-03T15:20:51,143 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x490457fd to 127.0.0.1:60989 2024-12-03T15:20:51,143 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:20:51,144 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c8de680 to 127.0.0.1:60989 2024-12-03T15:20:51,144 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:20:51,145 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72e97e4b to 127.0.0.1:60989 2024-12-03T15:20:51,145 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:20:51,146 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f6b07e3 to 127.0.0.1:60989 2024-12-03T15:20:51,146 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:20:51,208 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18603bb9 to 127.0.0.1:60989 2024-12-03T15:20:51,208 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:20:51,214 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72b32f98 to 127.0.0.1:60989 2024-12-03T15:20:51,214 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:20:51,365 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/18594a529ba047e1b5593fc029454b9a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/18594a529ba047e1b5593fc029454b9a 2024-12-03T15:20:51,377 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/B of 267a7e743c7c4973345ceaeae71cae1f into 18594a529ba047e1b5593fc029454b9a(size=13.0 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:51,377 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:51,377 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/B, priority=13, startTime=1733239250599; duration=0sec 2024-12-03T15:20:51,378 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:51,378 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:B 2024-12-03T15:20:51,490 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/277c701e2a0f489685e35d50d8f55a8d 2024-12-03T15:20:51,515 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/c4d16febba24437f8562bf6bfc63bfa5 is 50, key is test_row_0/B:col10/1733239250834/Put/seqid=0 2024-12-03T15:20:51,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741939_1115 (size=12301) 2024-12-03T15:20:51,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/c4d16febba24437f8562bf6bfc63bfa5 2024-12-03T15:20:51,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/f5c4bdc3bebf43e4b4862b46a6209d7e is 50, key is test_row_0/C:col10/1733239250834/Put/seqid=0 2024-12-03T15:20:51,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741940_1116 (size=12301) 2024-12-03T15:20:51,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/f5c4bdc3bebf43e4b4862b46a6209d7e 2024-12-03T15:20:51,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/277c701e2a0f489685e35d50d8f55a8d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/277c701e2a0f489685e35d50d8f55a8d 2024-12-03T15:20:51,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/277c701e2a0f489685e35d50d8f55a8d, entries=200, sequenceid=474, filesize=14.4 K 2024-12-03T15:20:51,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/c4d16febba24437f8562bf6bfc63bfa5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c4d16febba24437f8562bf6bfc63bfa5 2024-12-03T15:20:51,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c4d16febba24437f8562bf6bfc63bfa5, entries=150, sequenceid=474, filesize=12.0 K 2024-12-03T15:20:51,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/f5c4bdc3bebf43e4b4862b46a6209d7e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/f5c4bdc3bebf43e4b4862b46a6209d7e 2024-12-03T15:20:51,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/f5c4bdc3bebf43e4b4862b46a6209d7e, entries=150, sequenceid=474, filesize=12.0 K 2024-12-03T15:20:51,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=33.54 KB/34350 for 267a7e743c7c4973345ceaeae71cae1f in 595ms, sequenceid=474, compaction requested=true 2024-12-03T15:20:51,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:51,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:20:51,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:51,599 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:51,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:20:51,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:51,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 267a7e743c7c4973345ceaeae71cae1f:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:20:51,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:51,599 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:51,600 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:51,600 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42839 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:51,600 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/B is initiating minor compaction (all files) 2024-12-03T15:20:51,600 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/A is initiating minor compaction (all files) 2024-12-03T15:20:51,601 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/B in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:51,601 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/A in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:51,601 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/18594a529ba047e1b5593fc029454b9a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c0d5b0fea3d24cd99c94999266cc89e3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c4d16febba24437f8562bf6bfc63bfa5] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=37.1 K 2024-12-03T15:20:51,601 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2b444bc705184acc948567177d0765d7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/09f17aa907fc43c9977aa9a7009b3073, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/277c701e2a0f489685e35d50d8f55a8d] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=41.8 K 2024-12-03T15:20:51,601 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 18594a529ba047e1b5593fc029454b9a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733239249213 2024-12-03T15:20:51,601 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b444bc705184acc948567177d0765d7, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733239249213 2024-12-03T15:20:51,602 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c0d5b0fea3d24cd99c94999266cc89e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1733239249905 2024-12-03T15:20:51,602 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09f17aa907fc43c9977aa9a7009b3073, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1733239249905 2024-12-03T15:20:51,602 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c4d16febba24437f8562bf6bfc63bfa5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1733239250800 2024-12-03T15:20:51,602 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 277c701e2a0f489685e35d50d8f55a8d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1733239250800 2024-12-03T15:20:51,612 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#B#compaction#102 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:51,613 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/797f3dbf079a403384c01a60f947ba42 is 50, key is test_row_0/B:col10/1733239250834/Put/seqid=0 2024-12-03T15:20:51,614 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#A#compaction#103 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:51,615 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/88f04095f2eb4cc9adcff1fd45aece71 is 50, key is test_row_0/A:col10/1733239250834/Put/seqid=0 2024-12-03T15:20:51,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741941_1117 (size=13459) 2024-12-03T15:20:51,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741942_1118 (size=13459) 2024-12-03T15:20:51,623 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/797f3dbf079a403384c01a60f947ba42 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/797f3dbf079a403384c01a60f947ba42 2024-12-03T15:20:51,628 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/B of 267a7e743c7c4973345ceaeae71cae1f into 797f3dbf079a403384c01a60f947ba42(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:51,629 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:51,629 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/B, priority=13, startTime=1733239251599; duration=0sec 2024-12-03T15:20:51,629 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:20:51,629 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:B 2024-12-03T15:20:51,629 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:20:51,630 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:20:51,630 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 267a7e743c7c4973345ceaeae71cae1f/C is initiating minor compaction (all files) 2024-12-03T15:20:51,630 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 267a7e743c7c4973345ceaeae71cae1f/C in TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:51,631 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/db0d2a70b2684f4d843326bfb9bb1721, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/77c8abfe16f94c1ab2c8eb58a3a9690c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/f5c4bdc3bebf43e4b4862b46a6209d7e] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp, totalSize=37.1 K 2024-12-03T15:20:51,631 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting db0d2a70b2684f4d843326bfb9bb1721, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1733239249213 2024-12-03T15:20:51,631 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 77c8abfe16f94c1ab2c8eb58a3a9690c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1733239249905 2024-12-03T15:20:51,632 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting f5c4bdc3bebf43e4b4862b46a6209d7e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1733239250800 2024-12-03T15:20:51,640 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 267a7e743c7c4973345ceaeae71cae1f#C#compaction#104 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:20:51,640 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/4ac837a283d448a3b5d2b89fc4284e30 is 50, key is test_row_0/C:col10/1733239250834/Put/seqid=0 2024-12-03T15:20:51,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741943_1119 (size=13459) 2024-12-03T15:20:52,031 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/88f04095f2eb4cc9adcff1fd45aece71 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/88f04095f2eb4cc9adcff1fd45aece71 2024-12-03T15:20:52,053 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/A of 267a7e743c7c4973345ceaeae71cae1f into 88f04095f2eb4cc9adcff1fd45aece71(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:52,053 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:52,053 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/A, priority=13, startTime=1733239251599; duration=0sec 2024-12-03T15:20:52,053 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:52,053 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:A 2024-12-03T15:20:52,055 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/4ac837a283d448a3b5d2b89fc4284e30 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/4ac837a283d448a3b5d2b89fc4284e30 2024-12-03T15:20:52,062 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 267a7e743c7c4973345ceaeae71cae1f/C of 267a7e743c7c4973345ceaeae71cae1f into 4ac837a283d448a3b5d2b89fc4284e30(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:20:52,062 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:52,062 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f., storeName=267a7e743c7c4973345ceaeae71cae1f/C, priority=13, startTime=1733239251599; duration=0sec 2024-12-03T15:20:52,062 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:20:52,062 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 267a7e743c7c4973345ceaeae71cae1f:C 2024-12-03T15:20:53,424 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bbb5d8a to 127.0.0.1:60989 2024-12-03T15:20:53,424 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:20:53,426 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12885408 to 127.0.0.1:60989 2024-12-03T15:20:53,426 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:20:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:53,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 267a7e743c7c4973345ceaeae71cae1f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:20:53,428 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04977266 to 127.0.0.1:60989 2024-12-03T15:20:53,428 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:20:53,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=A 2024-12-03T15:20:53,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:53,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=B 2024-12-03T15:20:53,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:53,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 267a7e743c7c4973345ceaeae71cae1f, store=C 2024-12-03T15:20:53,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:53,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/5668f30de4fa4e35a3bb38047f1fdd2c is 50, key is test_row_0/A:col10/1733239251146/Put/seqid=0 2024-12-03T15:20:53,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741944_1120 (size=12301) 2024-12-03T15:20:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-03T15:20:53,483 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-03T15:20:53,483 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-03T15:20:53,483 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-12-03T15:20:53,483 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 102 2024-12-03T15:20:53,483 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-12-03T15:20:53,483 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-12-03T15:20:53,483 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 116 2024-12-03T15:20:53,483 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-03T15:20:53,483 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4304 2024-12-03T15:20:53,483 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4172 2024-12-03T15:20:53,483 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-03T15:20:53,483 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1849 2024-12-03T15:20:53,484 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5537 rows 2024-12-03T15:20:53,484 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1803 2024-12-03T15:20:53,484 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5402 rows 2024-12-03T15:20:53,484 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-03T15:20:53,484 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e98ea32 to 127.0.0.1:60989 2024-12-03T15:20:53,484 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:20:53,487 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-03T15:20:53,494 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-03T15:20:53,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-03T15:20:53,503 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239253503"}]},"ts":"1733239253503"} 2024-12-03T15:20:53,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-03T15:20:53,505 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-03T15:20:53,508 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-03T15:20:53,509 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-03T15:20:53,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=267a7e743c7c4973345ceaeae71cae1f, UNASSIGN}] 2024-12-03T15:20:53,515 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=28, ppid=27, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=267a7e743c7c4973345ceaeae71cae1f, UNASSIGN 2024-12-03T15:20:53,516 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=28 updating hbase:meta row=267a7e743c7c4973345ceaeae71cae1f, regionState=CLOSING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:53,518 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T15:20:53,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; CloseRegionProcedure 267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:20:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-03T15:20:53,674 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:53,676 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] handler.UnassignRegionHandler(124): Close 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:53,676 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-03T15:20:53,677 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1681): Closing 267a7e743c7c4973345ceaeae71cae1f, disabling compactions & flushes 2024-12-03T15:20:53,677 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:53,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-03T15:20:53,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/5668f30de4fa4e35a3bb38047f1fdd2c 2024-12-03T15:20:53,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/1e63ce1dd00841998135fb3a090c4d3b is 50, key is test_row_0/B:col10/1733239251146/Put/seqid=0 2024-12-03T15:20:53,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741945_1121 (size=12301) 2024-12-03T15:20:54,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-03T15:20:54,271 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/1e63ce1dd00841998135fb3a090c4d3b 2024-12-03T15:20:54,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/b754439c2c654476b75a701c6344a9f8 is 50, key is test_row_0/C:col10/1733239251146/Put/seqid=0 2024-12-03T15:20:54,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741946_1122 (size=12301) 2024-12-03T15:20:54,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-03T15:20:54,682 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T15:20:54,685 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/b754439c2c654476b75a701c6344a9f8 2024-12-03T15:20:54,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/A/5668f30de4fa4e35a3bb38047f1fdd2c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/5668f30de4fa4e35a3bb38047f1fdd2c 2024-12-03T15:20:54,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/5668f30de4fa4e35a3bb38047f1fdd2c, entries=150, sequenceid=489, filesize=12.0 K 2024-12-03T15:20:54,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/B/1e63ce1dd00841998135fb3a090c4d3b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/1e63ce1dd00841998135fb3a090c4d3b 2024-12-03T15:20:54,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/1e63ce1dd00841998135fb3a090c4d3b, entries=150, sequenceid=489, filesize=12.0 K 2024-12-03T15:20:54,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/.tmp/C/b754439c2c654476b75a701c6344a9f8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/b754439c2c654476b75a701c6344a9f8 2024-12-03T15:20:54,705 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/b754439c2c654476b75a701c6344a9f8, entries=150, sequenceid=489, filesize=12.0 K 2024-12-03T15:20:54,706 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for 267a7e743c7c4973345ceaeae71cae1f in 1279ms, sequenceid=489, compaction requested=false 2024-12-03T15:20:54,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:54,706 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:54,706 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:54,707 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. after waiting 0 ms 2024-12-03T15:20:54,707 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:54,707 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/c4101d21d5f943e0bfafab6200e58ee6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/e98fcb4198d444a29c361defc3edd987, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/49d2d662a3f94a4891e5f268c13787f0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8ee0aa2ffb6543aba188fb648c52035e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/58a1b04ed4ea4ff6a571ec53c477ab1b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8b8a9e58ca1d4b89a1a5e07ae6d440ab, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2ad0c6f83df446f0b256eeed7de2f41a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/cd4ff8bc00ee487e836e28f64c6084e2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/fb9b991d09854e428d0b2ab43e995c94, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/90cbde9c669d4988818b3d58b78f6760, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/7b37b8a1f07543e7a339ad4e313200ea, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/225299190056470bb36156a7cf264b75, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/b632e2750d034b68aa813dbdb9f0af98, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/080d351955b742f69eae3a4efbdb8556, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/3949941d2d124e70b88af806b4fed31c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8f0df04255664ad1a06f53f965258b7f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/e2ec41c383914caabba5eb327ba74134, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/3537c567404c4d9aa6c0527caf568684, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/9186463f92fc47d0858ffd80187b1f6a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8dd736271a374461a81aa670ecf9d2f9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/bb60750fbe5b4da29cf053908ce9d6f3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/122fcba3558e4ea8a094400fae25cf63, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2971f8ae19e94ab9892c09ca4d5ee234, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/06811816bfe249379b37146c06bf1567, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/c5be98809922432aaa782dbbe4ed932e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/1446da39e863477597182cc7cf8b6ea3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/44602c8f0a864f9980f2a91414abc669, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/bb31232ccc584d5bb1db97867648cf9b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/392108a3bf874c2983c6aecea77380f9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/4b0e52fe6a224552b240fded1f61a923, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/0df20bfa43e84f848891e23e5f55da19, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2b444bc705184acc948567177d0765d7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/09f17aa907fc43c9977aa9a7009b3073, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/277c701e2a0f489685e35d50d8f55a8d] to archive 2024-12-03T15:20:54,711 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:20:54,717 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/c4101d21d5f943e0bfafab6200e58ee6 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/c4101d21d5f943e0bfafab6200e58ee6 2024-12-03T15:20:54,719 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/e98fcb4198d444a29c361defc3edd987 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/e98fcb4198d444a29c361defc3edd987 2024-12-03T15:20:54,721 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/49d2d662a3f94a4891e5f268c13787f0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/49d2d662a3f94a4891e5f268c13787f0 2024-12-03T15:20:54,722 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8ee0aa2ffb6543aba188fb648c52035e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8ee0aa2ffb6543aba188fb648c52035e 2024-12-03T15:20:54,724 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/58a1b04ed4ea4ff6a571ec53c477ab1b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/58a1b04ed4ea4ff6a571ec53c477ab1b 2024-12-03T15:20:54,725 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8b8a9e58ca1d4b89a1a5e07ae6d440ab to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8b8a9e58ca1d4b89a1a5e07ae6d440ab 2024-12-03T15:20:54,727 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2ad0c6f83df446f0b256eeed7de2f41a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2ad0c6f83df446f0b256eeed7de2f41a 2024-12-03T15:20:54,728 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/cd4ff8bc00ee487e836e28f64c6084e2 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/cd4ff8bc00ee487e836e28f64c6084e2 2024-12-03T15:20:54,729 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/fb9b991d09854e428d0b2ab43e995c94 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/fb9b991d09854e428d0b2ab43e995c94 2024-12-03T15:20:54,731 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/90cbde9c669d4988818b3d58b78f6760 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/90cbde9c669d4988818b3d58b78f6760 2024-12-03T15:20:54,732 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/7b37b8a1f07543e7a339ad4e313200ea to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/7b37b8a1f07543e7a339ad4e313200ea 2024-12-03T15:20:54,733 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/225299190056470bb36156a7cf264b75 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/225299190056470bb36156a7cf264b75 2024-12-03T15:20:54,735 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/b632e2750d034b68aa813dbdb9f0af98 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/b632e2750d034b68aa813dbdb9f0af98 2024-12-03T15:20:54,736 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/080d351955b742f69eae3a4efbdb8556 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/080d351955b742f69eae3a4efbdb8556 2024-12-03T15:20:54,738 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/3949941d2d124e70b88af806b4fed31c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/3949941d2d124e70b88af806b4fed31c 2024-12-03T15:20:54,739 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8f0df04255664ad1a06f53f965258b7f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8f0df04255664ad1a06f53f965258b7f 2024-12-03T15:20:54,740 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/e2ec41c383914caabba5eb327ba74134 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/e2ec41c383914caabba5eb327ba74134 2024-12-03T15:20:54,743 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/3537c567404c4d9aa6c0527caf568684 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/3537c567404c4d9aa6c0527caf568684 2024-12-03T15:20:54,744 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/9186463f92fc47d0858ffd80187b1f6a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/9186463f92fc47d0858ffd80187b1f6a 2024-12-03T15:20:54,746 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8dd736271a374461a81aa670ecf9d2f9 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/8dd736271a374461a81aa670ecf9d2f9 2024-12-03T15:20:54,748 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/bb60750fbe5b4da29cf053908ce9d6f3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/bb60750fbe5b4da29cf053908ce9d6f3 2024-12-03T15:20:54,749 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/122fcba3558e4ea8a094400fae25cf63 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/122fcba3558e4ea8a094400fae25cf63 2024-12-03T15:20:54,751 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2971f8ae19e94ab9892c09ca4d5ee234 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2971f8ae19e94ab9892c09ca4d5ee234 2024-12-03T15:20:54,753 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/06811816bfe249379b37146c06bf1567 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/06811816bfe249379b37146c06bf1567 2024-12-03T15:20:54,755 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/c5be98809922432aaa782dbbe4ed932e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/c5be98809922432aaa782dbbe4ed932e 2024-12-03T15:20:54,756 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/1446da39e863477597182cc7cf8b6ea3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/1446da39e863477597182cc7cf8b6ea3 2024-12-03T15:20:54,758 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/44602c8f0a864f9980f2a91414abc669 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/44602c8f0a864f9980f2a91414abc669 2024-12-03T15:20:54,760 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/bb31232ccc584d5bb1db97867648cf9b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/bb31232ccc584d5bb1db97867648cf9b 2024-12-03T15:20:54,761 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/392108a3bf874c2983c6aecea77380f9 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/392108a3bf874c2983c6aecea77380f9 2024-12-03T15:20:54,763 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/4b0e52fe6a224552b240fded1f61a923 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/4b0e52fe6a224552b240fded1f61a923 2024-12-03T15:20:54,764 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/0df20bfa43e84f848891e23e5f55da19 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/0df20bfa43e84f848891e23e5f55da19 2024-12-03T15:20:54,766 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2b444bc705184acc948567177d0765d7 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/2b444bc705184acc948567177d0765d7 2024-12-03T15:20:54,768 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/09f17aa907fc43c9977aa9a7009b3073 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/09f17aa907fc43c9977aa9a7009b3073 2024-12-03T15:20:54,769 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/277c701e2a0f489685e35d50d8f55a8d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/277c701e2a0f489685e35d50d8f55a8d 2024-12-03T15:20:54,788 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b7821a6a4cd44a029917cec60abc90e0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ad67661e41534bbc883c193d9380c877, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/24a366f2ea334141b243fe264614e7d5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/bc338f05a2a740b598d9af5a0e2f31f6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/131759d83bc146738c13d55a653d9af1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/e16b7a1fb3434d0f9b5d03270cb8e27c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ba26e9db5404449396de768cd80f24d3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/94b9dba8d2cf48a8b1248bedf3eaf2ec, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/9279ea2d9f5a41f0b0f17e350832e8dd, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/f7c1cfc8d7c74795841a4daeebaea929, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b8d4d3d815974629b77fb0bbecb3a3d7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/6fda85e561e047cb9a5da5a5387ec851, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0fa2f5fc7b174dfc8db734746d9178f1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/cf97da9887d84d7296bfb3871b662fa9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/9f16318608164f84b84f9a3e501669f9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0a2288dec73247cb929ebddf357fb2a0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/4c8ea5085d7241cd8c99c9e858d52794, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c67d669ff51d415b88b208642abe0f4a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/fbcc3a8ed3604b1487e08bcd2709e2a6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ea6fbfe0284e4798ae036565d5edf254, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/d3a4fa11665d4d1393b6df7a74fe29ac, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/1363272902494969a445d1f55f56e163, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b053767989d14e46b071d2eda49ddeca, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/50a760e4b7794cd19599d21b3179db48, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ab06c541c0d34653bfe6b4d4b2106ed0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b844107d16294aeda346eba6a0310b31, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/8aa1cdcfb33b426a9d9eecf27d7edd42, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b823d4cd58c74701a279f068583d6e00, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0fbd9577cfed45fc9e167d3b759b1236, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0c8da36d2f9c4563a4f0e3fe20cd5f86, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/18594a529ba047e1b5593fc029454b9a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/66f567109a61484698762d2452bafa67, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c0d5b0fea3d24cd99c94999266cc89e3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c4d16febba24437f8562bf6bfc63bfa5] to archive 2024-12-03T15:20:54,789 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:20:54,791 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b7821a6a4cd44a029917cec60abc90e0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b7821a6a4cd44a029917cec60abc90e0 2024-12-03T15:20:54,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ad67661e41534bbc883c193d9380c877 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ad67661e41534bbc883c193d9380c877 2024-12-03T15:20:54,794 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/24a366f2ea334141b243fe264614e7d5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/24a366f2ea334141b243fe264614e7d5 2024-12-03T15:20:54,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/bc338f05a2a740b598d9af5a0e2f31f6 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/bc338f05a2a740b598d9af5a0e2f31f6 2024-12-03T15:20:54,797 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/131759d83bc146738c13d55a653d9af1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/131759d83bc146738c13d55a653d9af1 2024-12-03T15:20:54,799 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/e16b7a1fb3434d0f9b5d03270cb8e27c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/e16b7a1fb3434d0f9b5d03270cb8e27c 2024-12-03T15:20:54,800 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ba26e9db5404449396de768cd80f24d3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ba26e9db5404449396de768cd80f24d3 2024-12-03T15:20:54,802 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/94b9dba8d2cf48a8b1248bedf3eaf2ec to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/94b9dba8d2cf48a8b1248bedf3eaf2ec 2024-12-03T15:20:54,803 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/9279ea2d9f5a41f0b0f17e350832e8dd to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/9279ea2d9f5a41f0b0f17e350832e8dd 2024-12-03T15:20:54,804 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/f7c1cfc8d7c74795841a4daeebaea929 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/f7c1cfc8d7c74795841a4daeebaea929 2024-12-03T15:20:54,806 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b8d4d3d815974629b77fb0bbecb3a3d7 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b8d4d3d815974629b77fb0bbecb3a3d7 2024-12-03T15:20:54,807 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/6fda85e561e047cb9a5da5a5387ec851 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/6fda85e561e047cb9a5da5a5387ec851 2024-12-03T15:20:54,810 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0fa2f5fc7b174dfc8db734746d9178f1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0fa2f5fc7b174dfc8db734746d9178f1 2024-12-03T15:20:54,826 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/cf97da9887d84d7296bfb3871b662fa9 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/cf97da9887d84d7296bfb3871b662fa9 2024-12-03T15:20:54,836 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/9f16318608164f84b84f9a3e501669f9 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/9f16318608164f84b84f9a3e501669f9 2024-12-03T15:20:54,839 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0a2288dec73247cb929ebddf357fb2a0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0a2288dec73247cb929ebddf357fb2a0 2024-12-03T15:20:54,841 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/4c8ea5085d7241cd8c99c9e858d52794 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/4c8ea5085d7241cd8c99c9e858d52794 2024-12-03T15:20:54,843 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c67d669ff51d415b88b208642abe0f4a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c67d669ff51d415b88b208642abe0f4a 2024-12-03T15:20:54,845 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/fbcc3a8ed3604b1487e08bcd2709e2a6 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/fbcc3a8ed3604b1487e08bcd2709e2a6 2024-12-03T15:20:54,847 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ea6fbfe0284e4798ae036565d5edf254 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ea6fbfe0284e4798ae036565d5edf254 2024-12-03T15:20:54,848 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/d3a4fa11665d4d1393b6df7a74fe29ac to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/d3a4fa11665d4d1393b6df7a74fe29ac 2024-12-03T15:20:54,849 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/1363272902494969a445d1f55f56e163 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/1363272902494969a445d1f55f56e163 2024-12-03T15:20:54,851 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b053767989d14e46b071d2eda49ddeca to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b053767989d14e46b071d2eda49ddeca 2024-12-03T15:20:54,852 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/50a760e4b7794cd19599d21b3179db48 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/50a760e4b7794cd19599d21b3179db48 2024-12-03T15:20:54,854 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ab06c541c0d34653bfe6b4d4b2106ed0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/ab06c541c0d34653bfe6b4d4b2106ed0 2024-12-03T15:20:54,855 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b844107d16294aeda346eba6a0310b31 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b844107d16294aeda346eba6a0310b31 2024-12-03T15:20:54,856 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/8aa1cdcfb33b426a9d9eecf27d7edd42 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/8aa1cdcfb33b426a9d9eecf27d7edd42 2024-12-03T15:20:54,858 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b823d4cd58c74701a279f068583d6e00 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/b823d4cd58c74701a279f068583d6e00 2024-12-03T15:20:54,859 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0fbd9577cfed45fc9e167d3b759b1236 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0fbd9577cfed45fc9e167d3b759b1236 2024-12-03T15:20:54,860 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0c8da36d2f9c4563a4f0e3fe20cd5f86 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/0c8da36d2f9c4563a4f0e3fe20cd5f86 2024-12-03T15:20:54,861 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/18594a529ba047e1b5593fc029454b9a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/18594a529ba047e1b5593fc029454b9a 2024-12-03T15:20:54,863 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/66f567109a61484698762d2452bafa67 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/66f567109a61484698762d2452bafa67 2024-12-03T15:20:54,864 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c0d5b0fea3d24cd99c94999266cc89e3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c0d5b0fea3d24cd99c94999266cc89e3 2024-12-03T15:20:54,866 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c4d16febba24437f8562bf6bfc63bfa5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/c4d16febba24437f8562bf6bfc63bfa5 2024-12-03T15:20:54,870 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/69ba7f67583f404f82736c01dadcb4d6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/931a88467b9147678a223b2f3720cb31, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/5a82592f42f44320a1147458cba93af4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a9169c751e714070a17a2789a0ea8048, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a50ee0a0ddf4486dbc9ade7100a94e4b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/4b9c927e04e647e386693bde58c65a1f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3ce0242b6c5d4eb0bafd6620ff86d605, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1937783b4a1e4de7b4b76f45013e6cf6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/9967f54d85c84d60ac5723c758a31d1a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/8fcf87a0493449cc84b51fb301c69b7b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a822c0b37cc24849b60785fd8f4b9383, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/6c431344777f416baaa1bcecc62c817e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/44be62ecd48e4cd49e86aac63c0f2643, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/9da6204b07794f04b82330942d4159f7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/265b953e000843f3a52fa907ceb29f51, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3d14e80e32b34088b526a033a471b9cd, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1f9a89fa24404278a2e6a8ad4d20d654, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/5c8bef8466ab43cda1432782b4fc959e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/773dea7427124274ab617049bb7cc5dc, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/79a0dd55fb824523911e11d96dbfa7f7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/ddcf5a3936ca4b6995cdc4e634350d16, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1a5be30dbed240ba9fa0e1e36790a05f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/114231fb8d1545f6a43a0aab298e3fc0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/67ef04eb77944548bb7637404bb0217a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a8ace802445a4c97b9306a5f61fc4c33, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/f9b933dd5fe342d3b68f63f79685cbe9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3d39f9b9c154419f945c4c7ccb077f1f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/eaa0dab000ec472c9aa805bf19c10096, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/674221656b35499e849d4fa703ef3a81, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/06c03fed9a864209ac51da5f82ee9d37, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/db0d2a70b2684f4d843326bfb9bb1721, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/7e4949d496df4edda7094986694fa768, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/77c8abfe16f94c1ab2c8eb58a3a9690c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/f5c4bdc3bebf43e4b4862b46a6209d7e] to archive 2024-12-03T15:20:54,871 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:20:54,873 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/69ba7f67583f404f82736c01dadcb4d6 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/69ba7f67583f404f82736c01dadcb4d6 2024-12-03T15:20:54,875 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/931a88467b9147678a223b2f3720cb31 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/931a88467b9147678a223b2f3720cb31 2024-12-03T15:20:54,877 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/5a82592f42f44320a1147458cba93af4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/5a82592f42f44320a1147458cba93af4 2024-12-03T15:20:54,879 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a9169c751e714070a17a2789a0ea8048 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a9169c751e714070a17a2789a0ea8048 2024-12-03T15:20:54,881 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a50ee0a0ddf4486dbc9ade7100a94e4b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a50ee0a0ddf4486dbc9ade7100a94e4b 2024-12-03T15:20:54,882 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/4b9c927e04e647e386693bde58c65a1f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/4b9c927e04e647e386693bde58c65a1f 2024-12-03T15:20:54,884 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3ce0242b6c5d4eb0bafd6620ff86d605 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3ce0242b6c5d4eb0bafd6620ff86d605 2024-12-03T15:20:54,885 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1937783b4a1e4de7b4b76f45013e6cf6 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1937783b4a1e4de7b4b76f45013e6cf6 2024-12-03T15:20:54,887 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/9967f54d85c84d60ac5723c758a31d1a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/9967f54d85c84d60ac5723c758a31d1a 2024-12-03T15:20:54,889 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/8fcf87a0493449cc84b51fb301c69b7b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/8fcf87a0493449cc84b51fb301c69b7b 2024-12-03T15:20:54,890 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a822c0b37cc24849b60785fd8f4b9383 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a822c0b37cc24849b60785fd8f4b9383 2024-12-03T15:20:54,892 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/6c431344777f416baaa1bcecc62c817e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/6c431344777f416baaa1bcecc62c817e 2024-12-03T15:20:54,893 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/44be62ecd48e4cd49e86aac63c0f2643 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/44be62ecd48e4cd49e86aac63c0f2643 2024-12-03T15:20:54,894 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/9da6204b07794f04b82330942d4159f7 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/9da6204b07794f04b82330942d4159f7 2024-12-03T15:20:54,896 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/265b953e000843f3a52fa907ceb29f51 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/265b953e000843f3a52fa907ceb29f51 2024-12-03T15:20:54,897 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3d14e80e32b34088b526a033a471b9cd to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3d14e80e32b34088b526a033a471b9cd 2024-12-03T15:20:54,899 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1f9a89fa24404278a2e6a8ad4d20d654 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1f9a89fa24404278a2e6a8ad4d20d654 2024-12-03T15:20:54,900 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/5c8bef8466ab43cda1432782b4fc959e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/5c8bef8466ab43cda1432782b4fc959e 2024-12-03T15:20:54,901 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/773dea7427124274ab617049bb7cc5dc to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/773dea7427124274ab617049bb7cc5dc 2024-12-03T15:20:54,903 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/79a0dd55fb824523911e11d96dbfa7f7 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/79a0dd55fb824523911e11d96dbfa7f7 2024-12-03T15:20:54,904 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/ddcf5a3936ca4b6995cdc4e634350d16 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/ddcf5a3936ca4b6995cdc4e634350d16 2024-12-03T15:20:54,905 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1a5be30dbed240ba9fa0e1e36790a05f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/1a5be30dbed240ba9fa0e1e36790a05f 2024-12-03T15:20:54,907 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/114231fb8d1545f6a43a0aab298e3fc0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/114231fb8d1545f6a43a0aab298e3fc0 2024-12-03T15:20:54,908 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/67ef04eb77944548bb7637404bb0217a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/67ef04eb77944548bb7637404bb0217a 2024-12-03T15:20:54,910 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a8ace802445a4c97b9306a5f61fc4c33 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/a8ace802445a4c97b9306a5f61fc4c33 2024-12-03T15:20:54,911 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/f9b933dd5fe342d3b68f63f79685cbe9 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/f9b933dd5fe342d3b68f63f79685cbe9 2024-12-03T15:20:54,912 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3d39f9b9c154419f945c4c7ccb077f1f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/3d39f9b9c154419f945c4c7ccb077f1f 2024-12-03T15:20:54,913 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/eaa0dab000ec472c9aa805bf19c10096 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/eaa0dab000ec472c9aa805bf19c10096 2024-12-03T15:20:54,914 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/674221656b35499e849d4fa703ef3a81 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/674221656b35499e849d4fa703ef3a81 2024-12-03T15:20:54,916 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/06c03fed9a864209ac51da5f82ee9d37 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/06c03fed9a864209ac51da5f82ee9d37 2024-12-03T15:20:54,917 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/db0d2a70b2684f4d843326bfb9bb1721 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/db0d2a70b2684f4d843326bfb9bb1721 2024-12-03T15:20:54,918 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/7e4949d496df4edda7094986694fa768 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/7e4949d496df4edda7094986694fa768 2024-12-03T15:20:54,919 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/77c8abfe16f94c1ab2c8eb58a3a9690c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/77c8abfe16f94c1ab2c8eb58a3a9690c 2024-12-03T15:20:54,921 DEBUG [StoreCloser-TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/f5c4bdc3bebf43e4b4862b46a6209d7e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/f5c4bdc3bebf43e4b4862b46a6209d7e 2024-12-03T15:20:54,931 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/recovered.edits/492.seqid, newMaxSeqId=492, maxSeqId=1 2024-12-03T15:20:54,935 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f. 2024-12-03T15:20:54,935 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1635): Region close journal for 267a7e743c7c4973345ceaeae71cae1f: 2024-12-03T15:20:54,938 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] handler.UnassignRegionHandler(170): Closed 267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:54,939 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=28 updating hbase:meta row=267a7e743c7c4973345ceaeae71cae1f, regionState=CLOSED 2024-12-03T15:20:54,941 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-03T15:20:54,941 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; CloseRegionProcedure 267a7e743c7c4973345ceaeae71cae1f, server=2b5ef621a0dd,46815,1733239226292 in 1.4220 sec 2024-12-03T15:20:54,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=27 2024-12-03T15:20:54,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=27, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=267a7e743c7c4973345ceaeae71cae1f, UNASSIGN in 1.4270 sec 2024-12-03T15:20:54,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-03T15:20:54,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4350 sec 2024-12-03T15:20:54,946 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239254946"}]},"ts":"1733239254946"} 2024-12-03T15:20:54,948 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-03T15:20:54,950 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-03T15:20:54,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4550 sec 2024-12-03T15:20:55,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-03T15:20:55,608 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-03T15:20:55,611 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-03T15:20:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:20:55,617 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=30, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:20:55,618 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=30, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:20:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-03T15:20:55,621 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:55,625 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/recovered.edits] 2024-12-03T15:20:55,628 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/5668f30de4fa4e35a3bb38047f1fdd2c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/5668f30de4fa4e35a3bb38047f1fdd2c 2024-12-03T15:20:55,629 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/88f04095f2eb4cc9adcff1fd45aece71 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/A/88f04095f2eb4cc9adcff1fd45aece71 2024-12-03T15:20:55,634 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/1e63ce1dd00841998135fb3a090c4d3b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/1e63ce1dd00841998135fb3a090c4d3b 2024-12-03T15:20:55,635 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/797f3dbf079a403384c01a60f947ba42 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/B/797f3dbf079a403384c01a60f947ba42 2024-12-03T15:20:55,638 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/4ac837a283d448a3b5d2b89fc4284e30 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/4ac837a283d448a3b5d2b89fc4284e30 2024-12-03T15:20:55,639 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/b754439c2c654476b75a701c6344a9f8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/C/b754439c2c654476b75a701c6344a9f8 2024-12-03T15:20:55,642 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/recovered.edits/492.seqid to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f/recovered.edits/492.seqid 2024-12-03T15:20:55,643 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/267a7e743c7c4973345ceaeae71cae1f 2024-12-03T15:20:55,643 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-03T15:20:55,648 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=30, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:20:55,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-03T15:20:55,656 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-03T15:20:55,703 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-03T15:20:55,705 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=30, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:20:55,705 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-03T15:20:55,706 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733239255705"}]},"ts":"9223372036854775807"} 2024-12-03T15:20:55,718 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-03T15:20:55,718 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 267a7e743c7c4973345ceaeae71cae1f, NAME => 'TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f.', STARTKEY => '', ENDKEY => ''}] 2024-12-03T15:20:55,718 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-03T15:20:55,719 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733239255718"}]},"ts":"9223372036854775807"} 2024-12-03T15:20:55,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-03T15:20:55,722 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-03T15:20:55,725 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=30, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:20:55,726 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 113 msec 2024-12-03T15:20:55,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-03T15:20:55,922 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-03T15:20:55,941 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=241 (was 219) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x240475eb-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1915712582_22 at /127.0.0.1:57254 [Waiting for operation #269] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1915712582_22 at /127.0.0.1:38048 [Waiting for operation #254] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;2b5ef621a0dd:46815-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x240475eb-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x240475eb-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x240475eb-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1502572181_22 at /127.0.0.1:52320 [Waiting for operation #43] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1502572181_22 at /127.0.0.1:51926 [Waiting for operation #127] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=463 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=798 (was 474) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1086 (was 3002) 2024-12-03T15:20:55,952 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=241, OpenFileDescriptor=463, MaxFileDescriptor=1048576, SystemLoadAverage=798, ProcessCount=11, AvailableMemoryMB=1084 2024-12-03T15:20:55,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-03T15:20:55,954 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T15:20:55,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=31, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-03T15:20:55,956 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T15:20:55,956 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:55,956 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 31 2024-12-03T15:20:55,957 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T15:20:55,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-03T15:20:55,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741947_1123 (size=963) 2024-12-03T15:20:56,053 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-03T15:20:56,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-03T15:20:56,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-03T15:20:56,366 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411 2024-12-03T15:20:56,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741948_1124 (size=53) 2024-12-03T15:20:56,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-03T15:20:56,773 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:20:56,773 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 9cb1ca4e5b5289fcc2a0bafc5801cb91, disabling compactions & flushes 2024-12-03T15:20:56,773 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:56,773 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:56,773 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. after waiting 0 ms 2024-12-03T15:20:56,773 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:56,773 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:56,773 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:20:56,775 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T15:20:56,775 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733239256775"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733239256775"}]},"ts":"1733239256775"} 2024-12-03T15:20:56,776 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T15:20:56,777 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T15:20:56,778 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239256777"}]},"ts":"1733239256777"} 2024-12-03T15:20:56,779 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-03T15:20:56,783 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9cb1ca4e5b5289fcc2a0bafc5801cb91, ASSIGN}] 2024-12-03T15:20:56,784 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9cb1ca4e5b5289fcc2a0bafc5801cb91, ASSIGN 2024-12-03T15:20:56,785 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9cb1ca4e5b5289fcc2a0bafc5801cb91, ASSIGN; state=OFFLINE, location=2b5ef621a0dd,46815,1733239226292; forceNewPlan=false, retain=false 2024-12-03T15:20:56,935 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=9cb1ca4e5b5289fcc2a0bafc5801cb91, regionState=OPENING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:56,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; OpenRegionProcedure 9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:20:57,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-03T15:20:57,089 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:57,092 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:57,093 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} 2024-12-03T15:20:57,093 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:57,093 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:20:57,093 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:57,094 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:57,095 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:57,097 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:20:57,097 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9cb1ca4e5b5289fcc2a0bafc5801cb91 columnFamilyName A 2024-12-03T15:20:57,097 DEBUG [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:57,098 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.HStore(327): Store=9cb1ca4e5b5289fcc2a0bafc5801cb91/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:57,098 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:57,099 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:20:57,100 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9cb1ca4e5b5289fcc2a0bafc5801cb91 columnFamilyName B 2024-12-03T15:20:57,100 DEBUG [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:57,101 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.HStore(327): Store=9cb1ca4e5b5289fcc2a0bafc5801cb91/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:57,101 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:57,102 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:20:57,103 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9cb1ca4e5b5289fcc2a0bafc5801cb91 columnFamilyName C 2024-12-03T15:20:57,103 DEBUG [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:57,103 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.HStore(327): Store=9cb1ca4e5b5289fcc2a0bafc5801cb91/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:57,103 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:57,104 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:57,104 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:57,106 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T15:20:57,107 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:57,110 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T15:20:57,110 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened 9cb1ca4e5b5289fcc2a0bafc5801cb91; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66799239, jitterRate=-0.004613772034645081}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T15:20:57,111 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:20:57,112 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., pid=33, masterSystemTime=1733239257089 2024-12-03T15:20:57,114 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:57,114 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:57,115 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=9cb1ca4e5b5289fcc2a0bafc5801cb91, regionState=OPEN, openSeqNum=2, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:57,118 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-03T15:20:57,118 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; OpenRegionProcedure 9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 in 179 msec 2024-12-03T15:20:57,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-12-03T15:20:57,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9cb1ca4e5b5289fcc2a0bafc5801cb91, ASSIGN in 335 msec 2024-12-03T15:20:57,121 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T15:20:57,121 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239257121"}]},"ts":"1733239257121"} 2024-12-03T15:20:57,122 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-03T15:20:57,126 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T15:20:57,127 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1720 sec 2024-12-03T15:20:58,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-03T15:20:58,062 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 31 completed 2024-12-03T15:20:58,064 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d29de25 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a378df6 2024-12-03T15:20:58,068 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cca453a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:58,069 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:58,071 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40000, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:58,073 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T15:20:58,074 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36534, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T15:20:58,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-03T15:20:58,079 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T15:20:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-03T15:20:58,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741949_1125 (size=999) 2024-12-03T15:20:58,499 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-03T15:20:58,499 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-03T15:20:58,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-03T15:20:58,511 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9cb1ca4e5b5289fcc2a0bafc5801cb91, REOPEN/MOVE}] 2024-12-03T15:20:58,512 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9cb1ca4e5b5289fcc2a0bafc5801cb91, REOPEN/MOVE 2024-12-03T15:20:58,513 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=9cb1ca4e5b5289fcc2a0bafc5801cb91, regionState=CLOSING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:58,514 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T15:20:58,514 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure 9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:20:58,666 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:58,667 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:58,667 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-03T15:20:58,668 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing 9cb1ca4e5b5289fcc2a0bafc5801cb91, disabling compactions & flushes 2024-12-03T15:20:58,668 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:58,668 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:58,668 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. after waiting 0 ms 2024-12-03T15:20:58,668 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:58,672 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-03T15:20:58,673 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:58,673 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:20:58,673 WARN [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionServer(3786): Not adding moved region record: 9cb1ca4e5b5289fcc2a0bafc5801cb91 to self. 2024-12-03T15:20:58,675 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:58,676 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=9cb1ca4e5b5289fcc2a0bafc5801cb91, regionState=CLOSED 2024-12-03T15:20:58,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-03T15:20:58,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure 9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 in 163 msec 2024-12-03T15:20:58,679 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9cb1ca4e5b5289fcc2a0bafc5801cb91, REOPEN/MOVE; state=CLOSED, location=2b5ef621a0dd,46815,1733239226292; forceNewPlan=false, retain=true 2024-12-03T15:20:58,830 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=9cb1ca4e5b5289fcc2a0bafc5801cb91, regionState=OPENING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:58,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=36, state=RUNNABLE; OpenRegionProcedure 9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:20:58,985 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:58,988 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:58,988 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(7285): Opening region: {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} 2024-12-03T15:20:58,989 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:58,989 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:20:58,989 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(7327): checking encryption for 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:58,989 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(7330): checking classloading for 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:58,994 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:58,995 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:20:59,001 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9cb1ca4e5b5289fcc2a0bafc5801cb91 columnFamilyName A 2024-12-03T15:20:59,005 DEBUG [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:59,006 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.HStore(327): Store=9cb1ca4e5b5289fcc2a0bafc5801cb91/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:59,007 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:59,008 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:20:59,008 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9cb1ca4e5b5289fcc2a0bafc5801cb91 columnFamilyName B 2024-12-03T15:20:59,008 DEBUG [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:59,009 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.HStore(327): Store=9cb1ca4e5b5289fcc2a0bafc5801cb91/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:59,009 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:59,012 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:20:59,012 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9cb1ca4e5b5289fcc2a0bafc5801cb91 columnFamilyName C 2024-12-03T15:20:59,012 DEBUG [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:59,014 INFO [StoreOpener-9cb1ca4e5b5289fcc2a0bafc5801cb91-1 {}] regionserver.HStore(327): Store=9cb1ca4e5b5289fcc2a0bafc5801cb91/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:20:59,014 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:59,015 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:59,016 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:59,018 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T15:20:59,032 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1085): writing seq id for 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:59,033 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1102): Opened 9cb1ca4e5b5289fcc2a0bafc5801cb91; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70885204, jitterRate=0.05627185106277466}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T15:20:59,034 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1001): Region open journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:20:59,038 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., pid=38, masterSystemTime=1733239258985 2024-12-03T15:20:59,041 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:59,041 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:59,041 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=9cb1ca4e5b5289fcc2a0bafc5801cb91, regionState=OPEN, openSeqNum=5, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,046 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=36 2024-12-03T15:20:59,046 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=36, state=SUCCESS; OpenRegionProcedure 9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 in 212 msec 2024-12-03T15:20:59,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-03T15:20:59,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9cb1ca4e5b5289fcc2a0bafc5801cb91, REOPEN/MOVE in 535 msec 2024-12-03T15:20:59,056 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-03T15:20:59,056 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 550 msec 2024-12-03T15:20:59,061 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 977 msec 2024-12-03T15:20:59,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-03T15:20:59,070 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2931c73e to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c7d6279 2024-12-03T15:20:59,102 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c820ef9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:59,103 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x491ea2ee to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5b55744e 2024-12-03T15:20:59,109 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3a4420, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:59,111 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x190853fc to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a9306be 2024-12-03T15:20:59,118 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24f64590, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:59,120 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46114993 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@769942d9 2024-12-03T15:20:59,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a4c53ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:59,128 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2885d2d9 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@cb464a 2024-12-03T15:20:59,138 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68f0be85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:59,139 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78cafade to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@152377d4 2024-12-03T15:20:59,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@517ff977, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:59,153 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14c16cd4 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a52344f 2024-12-03T15:20:59,158 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3448d233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:59,159 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0341384e to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8ba8425 2024-12-03T15:20:59,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a11164b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:59,163 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26b120d9 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7af61386 2024-12-03T15:20:59,166 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a7e1dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:20:59,177 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:20:59,178 DEBUG [hconnection-0x3b5a53c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:59,182 DEBUG [hconnection-0x1bf01718-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:59,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees 2024-12-03T15:20:59,184 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57138, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:59,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-03T15:20:59,190 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=39, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:20:59,190 DEBUG [hconnection-0x7b62f6ff-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:59,192 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:59,192 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:59,194 DEBUG [hconnection-0x21b61ef9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:59,194 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=39, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:20:59,194 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:20:59,196 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57170, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:59,198 DEBUG [hconnection-0x10658da6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:59,199 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:59,204 DEBUG [hconnection-0x7ff3aacc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:59,204 DEBUG [hconnection-0x5bd94073-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:59,206 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:59,206 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57194, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:59,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:20:59,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:59,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:20:59,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:59,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:20:59,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:59,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:20:59,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:59,240 DEBUG [hconnection-0x1cce6155-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:59,242 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57218, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:59,246 DEBUG [hconnection-0x6ddea86a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:20:59,247 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:20:59,272 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239319264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,273 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239319265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239319267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239319268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239319270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-03T15:20:59,322 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412030117e89aeffe4f059d965ea5043247d5_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239259221/Put/seqid=0 2024-12-03T15:20:59,353 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-03T15:20:59,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:59,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:20:59,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:59,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:59,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:59,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:59,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741950_1126 (size=12154) 2024-12-03T15:20:59,378 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:20:59,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239319376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239319377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239319378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239319378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239319383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,402 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412030117e89aeffe4f059d965ea5043247d5_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412030117e89aeffe4f059d965ea5043247d5_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:59,404 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/433a74e1f2e54ffcbb0e51a0fc94dd1d, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:20:59,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/433a74e1f2e54ffcbb0e51a0fc94dd1d is 175, key is test_row_0/A:col10/1733239259221/Put/seqid=0 2024-12-03T15:20:59,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741951_1127 (size=30955) 2024-12-03T15:20:59,457 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/433a74e1f2e54ffcbb0e51a0fc94dd1d 2024-12-03T15:20:59,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-03T15:20:59,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-03T15:20:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:20:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:59,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:59,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:59,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:59,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/9aa21d240b0b40c5ab92cbb82ed1430f is 50, key is test_row_0/B:col10/1733239259221/Put/seqid=0 2024-12-03T15:20:59,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741952_1128 (size=12001) 2024-12-03T15:20:59,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/9aa21d240b0b40c5ab92cbb82ed1430f 2024-12-03T15:20:59,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239319585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239319585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239319594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239319595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239319602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,665 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-03T15:20:59,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:59,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:20:59,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:59,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:59,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:59,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:20:59,667 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/90f1a832fa3446f8b1c10e7ebb187236 is 50, key is test_row_0/C:col10/1733239259221/Put/seqid=0 2024-12-03T15:20:59,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741953_1129 (size=12001) 2024-12-03T15:20:59,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/90f1a832fa3446f8b1c10e7ebb187236 2024-12-03T15:20:59,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/433a74e1f2e54ffcbb0e51a0fc94dd1d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/433a74e1f2e54ffcbb0e51a0fc94dd1d 2024-12-03T15:20:59,749 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/433a74e1f2e54ffcbb0e51a0fc94dd1d, entries=150, sequenceid=16, filesize=30.2 K 2024-12-03T15:20:59,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/9aa21d240b0b40c5ab92cbb82ed1430f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/9aa21d240b0b40c5ab92cbb82ed1430f 2024-12-03T15:20:59,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/9aa21d240b0b40c5ab92cbb82ed1430f, entries=150, sequenceid=16, filesize=11.7 K 2024-12-03T15:20:59,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/90f1a832fa3446f8b1c10e7ebb187236 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/90f1a832fa3446f8b1c10e7ebb187236 2024-12-03T15:20:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-03T15:20:59,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/90f1a832fa3446f8b1c10e7ebb187236, entries=150, sequenceid=16, filesize=11.7 K 2024-12-03T15:20:59,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 576ms, sequenceid=16, compaction requested=false 2024-12-03T15:20:59,799 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-03T15:20:59,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:20:59,821 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-03T15:20:59,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:20:59,822 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-03T15:20:59,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:20:59,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:59,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:20:59,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:59,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:20:59,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:20:59,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412034eeec2cab9cb4912aefc810aedf5f1f9_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239259267/Put/seqid=0 2024-12-03T15:20:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:20:59,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:20:59,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741954_1130 (size=12154) 2024-12-03T15:20:59,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239319904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239319905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239319909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239319913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:20:59,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:20:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239319916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239320011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239320011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239320016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239320019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239320216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239320219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,224 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239320221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239320230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-03T15:21:00,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:00,305 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412034eeec2cab9cb4912aefc810aedf5f1f9_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034eeec2cab9cb4912aefc810aedf5f1f9_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:00,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/94fe0164503f44689270a692f2aa9944, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:00,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/94fe0164503f44689270a692f2aa9944 is 175, key is test_row_0/A:col10/1733239259267/Put/seqid=0 2024-12-03T15:21:00,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741955_1131 (size=30955) 2024-12-03T15:21:00,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239320424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,520 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-03T15:21:00,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239320521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239320528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239320529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:00,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239320544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:00,778 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/94fe0164503f44689270a692f2aa9944 2024-12-03T15:21:00,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/ab5d649fcfe740c19c26758ad26d35ce is 50, key is test_row_0/B:col10/1733239259267/Put/seqid=0 2024-12-03T15:21:00,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741956_1132 (size=12001) 2024-12-03T15:21:01,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:01,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239321029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:01,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:01,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239321031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:01,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239321035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:01,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:01,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239321052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:01,241 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/ab5d649fcfe740c19c26758ad26d35ce 2024-12-03T15:21:01,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/9f2571f2b6ad4a898c8d62b856765194 is 50, key is test_row_0/C:col10/1733239259267/Put/seqid=0 2024-12-03T15:21:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-03T15:21:01,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741957_1133 (size=12001) 2024-12-03T15:21:01,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:01,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239321444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:01,714 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/9f2571f2b6ad4a898c8d62b856765194 2024-12-03T15:21:01,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/94fe0164503f44689270a692f2aa9944 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/94fe0164503f44689270a692f2aa9944 2024-12-03T15:21:01,735 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/94fe0164503f44689270a692f2aa9944, entries=150, sequenceid=40, filesize=30.2 K 2024-12-03T15:21:01,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/ab5d649fcfe740c19c26758ad26d35ce as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/ab5d649fcfe740c19c26758ad26d35ce 2024-12-03T15:21:01,753 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/ab5d649fcfe740c19c26758ad26d35ce, entries=150, sequenceid=40, filesize=11.7 K 2024-12-03T15:21:01,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/9f2571f2b6ad4a898c8d62b856765194 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/9f2571f2b6ad4a898c8d62b856765194 2024-12-03T15:21:01,762 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/9f2571f2b6ad4a898c8d62b856765194, entries=150, sequenceid=40, filesize=11.7 K 2024-12-03T15:21:01,774 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 1947ms, sequenceid=40, compaction requested=false 2024-12-03T15:21:01,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:01,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:01,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-03T15:21:01,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-03T15:21:01,788 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-03T15:21:01,788 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5910 sec 2024-12-03T15:21:01,790 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees in 2.6110 sec 2024-12-03T15:21:02,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:02,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-03T15:21:02,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:02,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:02,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:02,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:02,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:02,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:02,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412038c50b528b1eb4c19a21dc47be41db60f_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239262037/Put/seqid=0 2024-12-03T15:21:02,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239322087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239322090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239322090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239322092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741958_1134 (size=12154) 2024-12-03T15:21:02,131 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:02,141 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412038c50b528b1eb4c19a21dc47be41db60f_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412038c50b528b1eb4c19a21dc47be41db60f_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:02,145 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/2290a360b5fd447bb86f4904772b5ba0, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:02,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/2290a360b5fd447bb86f4904772b5ba0 is 175, key is test_row_0/A:col10/1733239262037/Put/seqid=0 2024-12-03T15:21:02,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741959_1135 (size=30955) 2024-12-03T15:21:02,173 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/2290a360b5fd447bb86f4904772b5ba0 2024-12-03T15:21:02,181 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-03T15:21:02,183 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41872, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-03T15:21:02,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239322201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239322203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239322204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239322206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/9b37596b6e214282870fb50e4600b87c is 50, key is test_row_0/B:col10/1733239262037/Put/seqid=0 2024-12-03T15:21:02,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741960_1136 (size=12001) 2024-12-03T15:21:02,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239322408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239322409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239322412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239322412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,650 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/9b37596b6e214282870fb50e4600b87c 2024-12-03T15:21:02,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/bf80d25df5164aa5b95a09b5526eff60 is 50, key is test_row_0/C:col10/1733239262037/Put/seqid=0 2024-12-03T15:21:02,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239322719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239322720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741961_1137 (size=12001) 2024-12-03T15:21:02,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239322723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:02,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:02,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239322725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/bf80d25df5164aa5b95a09b5526eff60 2024-12-03T15:21:03,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/2290a360b5fd447bb86f4904772b5ba0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/2290a360b5fd447bb86f4904772b5ba0 2024-12-03T15:21:03,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/2290a360b5fd447bb86f4904772b5ba0, entries=150, sequenceid=54, filesize=30.2 K 2024-12-03T15:21:03,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/9b37596b6e214282870fb50e4600b87c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/9b37596b6e214282870fb50e4600b87c 2024-12-03T15:21:03,194 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/9b37596b6e214282870fb50e4600b87c, entries=150, sequenceid=54, filesize=11.7 K 2024-12-03T15:21:03,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/bf80d25df5164aa5b95a09b5526eff60 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/bf80d25df5164aa5b95a09b5526eff60 2024-12-03T15:21:03,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/bf80d25df5164aa5b95a09b5526eff60, entries=150, sequenceid=54, filesize=11.7 K 2024-12-03T15:21:03,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 1193ms, sequenceid=54, compaction requested=true 2024-12-03T15:21:03,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:03,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:03,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:03,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:03,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:21:03,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:03,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-03T15:21:03,233 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:03,234 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:03,264 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:03,264 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/A is initiating minor compaction (all files) 2024-12-03T15:21:03,264 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/A in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:03,264 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/433a74e1f2e54ffcbb0e51a0fc94dd1d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/94fe0164503f44689270a692f2aa9944, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/2290a360b5fd447bb86f4904772b5ba0] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=90.7 K 2024-12-03T15:21:03,264 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:03,264 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/433a74e1f2e54ffcbb0e51a0fc94dd1d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/94fe0164503f44689270a692f2aa9944, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/2290a360b5fd447bb86f4904772b5ba0] 2024-12-03T15:21:03,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-03T15:21:03,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:03,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:03,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:03,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:03,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:03,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:03,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:03,277 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:03,277 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/C is initiating minor compaction (all files) 2024-12-03T15:21:03,278 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/C in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:03,278 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/90f1a832fa3446f8b1c10e7ebb187236, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/9f2571f2b6ad4a898c8d62b856765194, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/bf80d25df5164aa5b95a09b5526eff60] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=35.2 K 2024-12-03T15:21:03,278 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 433a74e1f2e54ffcbb0e51a0fc94dd1d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733239259217 2024-12-03T15:21:03,284 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 90f1a832fa3446f8b1c10e7ebb187236, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733239259217 2024-12-03T15:21:03,284 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94fe0164503f44689270a692f2aa9944, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733239259254 2024-12-03T15:21:03,285 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f2571f2b6ad4a898c8d62b856765194, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733239259254 2024-12-03T15:21:03,286 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2290a360b5fd447bb86f4904772b5ba0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733239259900 2024-12-03T15:21:03,288 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting bf80d25df5164aa5b95a09b5526eff60, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733239259900 2024-12-03T15:21:03,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-03T15:21:03,301 INFO [Thread-634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-12-03T15:21:03,307 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:03,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees 2024-12-03T15:21:03,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-03T15:21:03,309 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:03,310 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:03,310 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:03,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239323313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239323315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120374ae8e89d2a04bd2bcfc94dab58ea552_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239262076/Put/seqid=0 2024-12-03T15:21:03,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239323321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239323324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,348 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#C#compaction#118 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:03,349 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/bd021f76dccf4eab9ba1c57742ca695d is 50, key is test_row_0/C:col10/1733239262037/Put/seqid=0 2024-12-03T15:21:03,364 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:03,386 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241203cf6330d19cee4329ac130da1e7032c3d_9cb1ca4e5b5289fcc2a0bafc5801cb91 store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:03,400 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241203cf6330d19cee4329ac130da1e7032c3d_9cb1ca4e5b5289fcc2a0bafc5801cb91, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:03,401 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203cf6330d19cee4329ac130da1e7032c3d_9cb1ca4e5b5289fcc2a0bafc5801cb91 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:03,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-03T15:21:03,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741962_1138 (size=17034) 2024-12-03T15:21:03,423 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:03,429 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120374ae8e89d2a04bd2bcfc94dab58ea552_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120374ae8e89d2a04bd2bcfc94dab58ea552_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:03,430 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/9a14ff5672ec4dadbe335023bb9d0d26, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:03,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/9a14ff5672ec4dadbe335023bb9d0d26 is 175, key is test_row_0/A:col10/1733239262076/Put/seqid=0 2024-12-03T15:21:03,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741963_1139 (size=12104) 2024-12-03T15:21:03,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239323436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239323439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239323442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239323442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,454 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/bd021f76dccf4eab9ba1c57742ca695d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/bd021f76dccf4eab9ba1c57742ca695d 2024-12-03T15:21:03,462 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-03T15:21:03,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:03,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:03,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:03,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:03,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:03,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:03,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741964_1140 (size=4469) 2024-12-03T15:21:03,491 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/C of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into bd021f76dccf4eab9ba1c57742ca695d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:03,491 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:03,491 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/C, priority=13, startTime=1733239263233; duration=0sec 2024-12-03T15:21:03,491 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:03,491 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:C 2024-12-03T15:21:03,491 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:03,492 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#A#compaction#119 average throughput is 0.19 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:03,494 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/408d7fcad34847b28645a8dfa9d2614b is 175, key is test_row_0/A:col10/1733239262037/Put/seqid=0 2024-12-03T15:21:03,537 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:03,537 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/B is initiating minor compaction (all files) 2024-12-03T15:21:03,537 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/B in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:03,537 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/9aa21d240b0b40c5ab92cbb82ed1430f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/ab5d649fcfe740c19c26758ad26d35ce, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/9b37596b6e214282870fb50e4600b87c] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=35.2 K 2024-12-03T15:21:03,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741965_1141 (size=48139) 2024-12-03T15:21:03,542 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 9aa21d240b0b40c5ab92cbb82ed1430f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733239259217 2024-12-03T15:21:03,542 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/9a14ff5672ec4dadbe335023bb9d0d26 2024-12-03T15:21:03,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239323538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,545 DEBUG [Thread-632 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4281 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:21:03,552 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting ab5d649fcfe740c19c26758ad26d35ce, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733239259254 2024-12-03T15:21:03,553 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b37596b6e214282870fb50e4600b87c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733239259900 2024-12-03T15:21:03,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741966_1142 (size=31058) 2024-12-03T15:21:03,575 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/408d7fcad34847b28645a8dfa9d2614b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/408d7fcad34847b28645a8dfa9d2614b 2024-12-03T15:21:03,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/fa25bb42f464472cb447a3ad1df9d6b3 is 50, key is test_row_0/B:col10/1733239262076/Put/seqid=0 2024-12-03T15:21:03,584 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/A of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 408d7fcad34847b28645a8dfa9d2614b(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:03,584 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:03,585 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/A, priority=13, startTime=1733239263233; duration=0sec 2024-12-03T15:21:03,585 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:03,585 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:A 2024-12-03T15:21:03,600 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#B#compaction#121 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:03,601 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/5c976c89f3424a9d91871f0e30076443 is 50, key is test_row_0/B:col10/1733239262037/Put/seqid=0 2024-12-03T15:21:03,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-03T15:21:03,621 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-03T15:21:03,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:03,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:03,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:03,622 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:03,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:03,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:03,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741967_1143 (size=12001) 2024-12-03T15:21:03,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239323647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239323647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239323647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239323648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741968_1144 (size=12104) 2024-12-03T15:21:03,775 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-03T15:21:03,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:03,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:03,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:03,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:03,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:03,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:03,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-03T15:21:03,933 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-03T15:21:03,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:03,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:03,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:03,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:03,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:03,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:03,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239323952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239323953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239323953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:03,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:03,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239323954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/fa25bb42f464472cb447a3ad1df9d6b3 2024-12-03T15:21:04,046 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/aae833c64287412cadb6d419fa949d1b is 50, key is test_row_0/C:col10/1733239262076/Put/seqid=0 2024-12-03T15:21:04,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741969_1145 (size=12001) 2024-12-03T15:21:04,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/aae833c64287412cadb6d419fa949d1b 2024-12-03T15:21:04,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/9a14ff5672ec4dadbe335023bb9d0d26 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9a14ff5672ec4dadbe335023bb9d0d26 2024-12-03T15:21:04,061 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/5c976c89f3424a9d91871f0e30076443 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/5c976c89f3424a9d91871f0e30076443 2024-12-03T15:21:04,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9a14ff5672ec4dadbe335023bb9d0d26, entries=250, sequenceid=78, filesize=47.0 K 2024-12-03T15:21:04,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/fa25bb42f464472cb447a3ad1df9d6b3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/fa25bb42f464472cb447a3ad1df9d6b3 2024-12-03T15:21:04,080 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/B of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 5c976c89f3424a9d91871f0e30076443(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:04,080 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:04,080 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/B, priority=13, startTime=1733239263233; duration=0sec 2024-12-03T15:21:04,080 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:04,080 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:B 2024-12-03T15:21:04,085 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/fa25bb42f464472cb447a3ad1df9d6b3, entries=150, sequenceid=78, filesize=11.7 K 2024-12-03T15:21:04,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/aae833c64287412cadb6d419fa949d1b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/aae833c64287412cadb6d419fa949d1b 2024-12-03T15:21:04,089 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-03T15:21:04,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:04,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:04,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:04,090 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:04,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:04,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:04,094 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/aae833c64287412cadb6d419fa949d1b, entries=150, sequenceid=78, filesize=11.7 K 2024-12-03T15:21:04,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 821ms, sequenceid=78, compaction requested=false 2024-12-03T15:21:04,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:04,242 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-03T15:21:04,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:04,244 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-03T15:21:04,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:04,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:04,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:04,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:04,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:04,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:04,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412034142a46a1e414dfb9a159ba8b9985369_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239263319/Put/seqid=0 2024-12-03T15:21:04,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741970_1146 (size=12154) 2024-12-03T15:21:04,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:04,273 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412034142a46a1e414dfb9a159ba8b9985369_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034142a46a1e414dfb9a159ba8b9985369_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:04,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/072a26d4ec5f4e35b6b8344926b49e4d, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:04,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/072a26d4ec5f4e35b6b8344926b49e4d is 175, key is test_row_0/A:col10/1733239263319/Put/seqid=0 2024-12-03T15:21:04,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741971_1147 (size=30955) 2024-12-03T15:21:04,287 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/072a26d4ec5f4e35b6b8344926b49e4d 2024-12-03T15:21:04,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/7f856b5814a74669902b8a6d2cee2040 is 50, key is test_row_0/B:col10/1733239263319/Put/seqid=0 2024-12-03T15:21:04,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741972_1148 (size=12001) 2024-12-03T15:21:04,317 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/7f856b5814a74669902b8a6d2cee2040 2024-12-03T15:21:04,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/c2ca57fb18cc4da5bf409a817e9eced7 is 50, key is test_row_0/C:col10/1733239263319/Put/seqid=0 2024-12-03T15:21:04,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741973_1149 (size=12001) 2024-12-03T15:21:04,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-03T15:21:04,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:04,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:04,524 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239324521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239324524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239324525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239324530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239324626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239324628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239324635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239324635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,742 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/c2ca57fb18cc4da5bf409a817e9eced7 2024-12-03T15:21:04,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/072a26d4ec5f4e35b6b8344926b49e4d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/072a26d4ec5f4e35b6b8344926b49e4d 2024-12-03T15:21:04,755 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/072a26d4ec5f4e35b6b8344926b49e4d, entries=150, sequenceid=93, filesize=30.2 K 2024-12-03T15:21:04,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/7f856b5814a74669902b8a6d2cee2040 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/7f856b5814a74669902b8a6d2cee2040 2024-12-03T15:21:04,766 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/7f856b5814a74669902b8a6d2cee2040, entries=150, sequenceid=93, filesize=11.7 K 2024-12-03T15:21:04,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/c2ca57fb18cc4da5bf409a817e9eced7 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/c2ca57fb18cc4da5bf409a817e9eced7 2024-12-03T15:21:04,779 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/c2ca57fb18cc4da5bf409a817e9eced7, entries=150, sequenceid=93, filesize=11.7 K 2024-12-03T15:21:04,780 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 536ms, sequenceid=93, compaction requested=true 2024-12-03T15:21:04,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:04,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:04,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-12-03T15:21:04,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=42 2024-12-03T15:21:04,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-03T15:21:04,785 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4730 sec 2024-12-03T15:21:04,788 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees in 1.4790 sec 2024-12-03T15:21:04,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:04,831 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-03T15:21:04,831 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:04,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:04,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:04,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:04,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:04,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:04,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239324844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239324845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,848 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412034d63e471683d4f1bac0e118c7076ffea_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239264514/Put/seqid=0 2024-12-03T15:21:04,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239324846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239324847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741974_1150 (size=14594) 2024-12-03T15:21:04,897 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:04,903 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412034d63e471683d4f1bac0e118c7076ffea_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034d63e471683d4f1bac0e118c7076ffea_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:04,906 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/7e02c0ad37f24498a5bd7b65b1931a90, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:04,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/7e02c0ad37f24498a5bd7b65b1931a90 is 175, key is test_row_0/A:col10/1733239264514/Put/seqid=0 2024-12-03T15:21:04,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741975_1151 (size=39549) 2024-12-03T15:21:04,938 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/7e02c0ad37f24498a5bd7b65b1931a90 2024-12-03T15:21:04,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239324948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239324950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239324951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:04,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239324954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:04,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/d220a48c1f634a53a1694642a5e9b953 is 50, key is test_row_0/B:col10/1733239264514/Put/seqid=0 2024-12-03T15:21:04,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741976_1152 (size=12001) 2024-12-03T15:21:04,988 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/d220a48c1f634a53a1694642a5e9b953 2024-12-03T15:21:05,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/3ac97fd7d478471db63ad0c7fec153ba is 50, key is test_row_0/C:col10/1733239264514/Put/seqid=0 2024-12-03T15:21:05,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741977_1153 (size=12001) 2024-12-03T15:21:05,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:05,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239325153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:05,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:05,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239325161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:05,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:05,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239325165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:05,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:05,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239325166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:05,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-03T15:21:05,418 INFO [Thread-634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-12-03T15:21:05,429 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:05,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-03T15:21:05,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-03T15:21:05,431 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:05,432 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:05,432 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:05,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:05,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239325465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:05,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:05,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239325469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:05,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:05,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239325473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:05,476 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/3ac97fd7d478471db63ad0c7fec153ba 2024-12-03T15:21:05,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:05,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239325481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:05,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/7e02c0ad37f24498a5bd7b65b1931a90 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7e02c0ad37f24498a5bd7b65b1931a90 2024-12-03T15:21:05,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7e02c0ad37f24498a5bd7b65b1931a90, entries=200, sequenceid=119, filesize=38.6 K 2024-12-03T15:21:05,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/d220a48c1f634a53a1694642a5e9b953 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/d220a48c1f634a53a1694642a5e9b953 2024-12-03T15:21:05,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/d220a48c1f634a53a1694642a5e9b953, entries=150, sequenceid=119, filesize=11.7 K 2024-12-03T15:21:05,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/3ac97fd7d478471db63ad0c7fec153ba as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/3ac97fd7d478471db63ad0c7fec153ba 2024-12-03T15:21:05,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-03T15:21:05,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/3ac97fd7d478471db63ad0c7fec153ba, entries=150, sequenceid=119, filesize=11.7 K 2024-12-03T15:21:05,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 706ms, sequenceid=119, compaction requested=true 2024-12-03T15:21:05,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:05,537 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:05,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:05,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:05,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:05,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:05,538 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:05,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:05,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:05,545 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 149701 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:05,545 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/A is initiating minor compaction (all files) 2024-12-03T15:21:05,545 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/A in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:05,545 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/408d7fcad34847b28645a8dfa9d2614b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9a14ff5672ec4dadbe335023bb9d0d26, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/072a26d4ec5f4e35b6b8344926b49e4d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7e02c0ad37f24498a5bd7b65b1931a90] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=146.2 K 2024-12-03T15:21:05,545 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:05,546 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/408d7fcad34847b28645a8dfa9d2614b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9a14ff5672ec4dadbe335023bb9d0d26, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/072a26d4ec5f4e35b6b8344926b49e4d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7e02c0ad37f24498a5bd7b65b1931a90] 2024-12-03T15:21:05,546 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 408d7fcad34847b28645a8dfa9d2614b, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733239259900 2024-12-03T15:21:05,546 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:05,547 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/B is initiating minor compaction (all files) 2024-12-03T15:21:05,547 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/B in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:05,547 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a14ff5672ec4dadbe335023bb9d0d26, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239262076 2024-12-03T15:21:05,547 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/5c976c89f3424a9d91871f0e30076443, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/fa25bb42f464472cb447a3ad1df9d6b3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/7f856b5814a74669902b8a6d2cee2040, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/d220a48c1f634a53a1694642a5e9b953] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=47.0 K 2024-12-03T15:21:05,548 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c976c89f3424a9d91871f0e30076443, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733239259900 2024-12-03T15:21:05,548 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 072a26d4ec5f4e35b6b8344926b49e4d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239263313 2024-12-03T15:21:05,548 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting fa25bb42f464472cb447a3ad1df9d6b3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239262076 2024-12-03T15:21:05,548 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e02c0ad37f24498a5bd7b65b1931a90, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733239264514 2024-12-03T15:21:05,549 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f856b5814a74669902b8a6d2cee2040, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239263313 2024-12-03T15:21:05,550 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting d220a48c1f634a53a1694642a5e9b953, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733239264514 2024-12-03T15:21:05,560 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:05,563 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241203a256dee9058a4d8493fce551b0280761_9cb1ca4e5b5289fcc2a0bafc5801cb91 store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:05,567 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241203a256dee9058a4d8493fce551b0280761_9cb1ca4e5b5289fcc2a0bafc5801cb91, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:05,567 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203a256dee9058a4d8493fce551b0280761_9cb1ca4e5b5289fcc2a0bafc5801cb91 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:05,573 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#B#compaction#130 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:05,574 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/72f470fda9354669b542b36edf2630e1 is 50, key is test_row_0/B:col10/1733239264514/Put/seqid=0 2024-12-03T15:21:05,585 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:05,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-03T15:21:05,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:05,586 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-03T15:21:05,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:05,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:05,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:05,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:05,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:05,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:05,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741978_1154 (size=4469) 2024-12-03T15:21:05,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120379a28755050444f8a7bc0da517e90cf1_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239264844/Put/seqid=0 2024-12-03T15:21:05,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741979_1155 (size=12241) 2024-12-03T15:21:05,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741980_1156 (size=12154) 2024-12-03T15:21:05,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:05,712 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120379a28755050444f8a7bc0da517e90cf1_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120379a28755050444f8a7bc0da517e90cf1_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:05,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/3f1c81f4eb5949cb9517f71d18f7bdb8, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:05,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/3f1c81f4eb5949cb9517f71d18f7bdb8 is 175, key is test_row_0/A:col10/1733239264844/Put/seqid=0 2024-12-03T15:21:05,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-03T15:21:05,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741981_1157 (size=30955) 2024-12-03T15:21:05,743 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=129, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/3f1c81f4eb5949cb9517f71d18f7bdb8 2024-12-03T15:21:05,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/09326e17b1144aea9c5becd83ee5bcb3 is 50, key is test_row_0/B:col10/1733239264844/Put/seqid=0 2024-12-03T15:21:05,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741982_1158 (size=12001) 2024-12-03T15:21:05,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:05,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:06,016 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#A#compaction#129 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:06,017 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/9fd126a698b342a1b36b2c33758ded19 is 175, key is test_row_0/A:col10/1733239264514/Put/seqid=0 2024-12-03T15:21:06,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-03T15:21:06,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239326033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239326039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239326040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,046 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/72f470fda9354669b542b36edf2630e1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/72f470fda9354669b542b36edf2630e1 2024-12-03T15:21:06,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239326041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741983_1159 (size=31195) 2024-12-03T15:21:06,060 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/B of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 72f470fda9354669b542b36edf2630e1(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:06,060 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:06,060 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/B, priority=12, startTime=1733239265538; duration=0sec 2024-12-03T15:21:06,060 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:06,061 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:B 2024-12-03T15:21:06,061 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:06,062 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-03T15:21:06,062 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-03T15:21:06,070 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:06,070 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/C is initiating minor compaction (all files) 2024-12-03T15:21:06,070 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/C in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:06,071 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/bd021f76dccf4eab9ba1c57742ca695d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/aae833c64287412cadb6d419fa949d1b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/c2ca57fb18cc4da5bf409a817e9eced7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/3ac97fd7d478471db63ad0c7fec153ba] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=47.0 K 2024-12-03T15:21:06,071 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting bd021f76dccf4eab9ba1c57742ca695d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733239259900 2024-12-03T15:21:06,072 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting aae833c64287412cadb6d419fa949d1b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239262076 2024-12-03T15:21:06,073 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/9fd126a698b342a1b36b2c33758ded19 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9fd126a698b342a1b36b2c33758ded19 2024-12-03T15:21:06,073 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c2ca57fb18cc4da5bf409a817e9eced7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239263313 2024-12-03T15:21:06,073 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ac97fd7d478471db63ad0c7fec153ba, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733239264514 2024-12-03T15:21:06,080 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/A of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 9fd126a698b342a1b36b2c33758ded19(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:06,080 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:06,080 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/A, priority=12, startTime=1733239265537; duration=0sec 2024-12-03T15:21:06,080 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:06,080 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:A 2024-12-03T15:21:06,095 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#C#compaction#133 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:06,095 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/dcfb6a22d56943f3b73bf0c82544810b is 50, key is test_row_0/C:col10/1733239264514/Put/seqid=0 2024-12-03T15:21:06,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741984_1160 (size=12241) 2024-12-03T15:21:06,150 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239326150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239326150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239326150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239326151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,202 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/09326e17b1144aea9c5becd83ee5bcb3 2024-12-03T15:21:06,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/7c48f5a384e74ff1830704299ecb0bb1 is 50, key is test_row_0/C:col10/1733239264844/Put/seqid=0 2024-12-03T15:21:06,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741985_1161 (size=12001) 2024-12-03T15:21:06,247 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/7c48f5a384e74ff1830704299ecb0bb1 2024-12-03T15:21:06,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/3f1c81f4eb5949cb9517f71d18f7bdb8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3f1c81f4eb5949cb9517f71d18f7bdb8 2024-12-03T15:21:06,263 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3f1c81f4eb5949cb9517f71d18f7bdb8, entries=150, sequenceid=129, filesize=30.2 K 2024-12-03T15:21:06,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/09326e17b1144aea9c5becd83ee5bcb3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/09326e17b1144aea9c5becd83ee5bcb3 2024-12-03T15:21:06,278 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/09326e17b1144aea9c5becd83ee5bcb3, entries=150, sequenceid=129, filesize=11.7 K 2024-12-03T15:21:06,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/7c48f5a384e74ff1830704299ecb0bb1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/7c48f5a384e74ff1830704299ecb0bb1 2024-12-03T15:21:06,288 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/7c48f5a384e74ff1830704299ecb0bb1, entries=150, sequenceid=129, filesize=11.7 K 2024-12-03T15:21:06,292 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 706ms, sequenceid=129, compaction requested=false 2024-12-03T15:21:06,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:06,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:06,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-03T15:21:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-03T15:21:06,297 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-03T15:21:06,297 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 862 msec 2024-12-03T15:21:06,300 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 869 msec 2024-12-03T15:21:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:06,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-03T15:21:06,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:06,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:06,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:06,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:06,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:06,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:06,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239326360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239326362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239326362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239326363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,372 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203d61826664f274b12aacab8c0ef3cb402_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239266353/Put/seqid=0 2024-12-03T15:21:06,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741986_1162 (size=12304) 2024-12-03T15:21:06,398 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:06,404 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203d61826664f274b12aacab8c0ef3cb402_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203d61826664f274b12aacab8c0ef3cb402_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:06,405 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/4486fe8128af4a9b80a5475dc6f70099, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:06,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/4486fe8128af4a9b80a5475dc6f70099 is 175, key is test_row_0/A:col10/1733239266353/Put/seqid=0 2024-12-03T15:21:06,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741987_1163 (size=31105) 2024-12-03T15:21:06,443 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/4486fe8128af4a9b80a5475dc6f70099 2024-12-03T15:21:06,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/82850905492f41fb942afc4e5fd1cf5d is 50, key is test_row_0/B:col10/1733239266353/Put/seqid=0 2024-12-03T15:21:06,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239326467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239326468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239326470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239326470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741988_1164 (size=12151) 2024-12-03T15:21:06,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/82850905492f41fb942afc4e5fd1cf5d 2024-12-03T15:21:06,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-03T15:21:06,535 INFO [Thread-634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-03T15:21:06,545 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:06,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-03T15:21:06,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-03T15:21:06,556 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:06,556 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/dcfb6a22d56943f3b73bf0c82544810b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/dcfb6a22d56943f3b73bf0c82544810b 2024-12-03T15:21:06,557 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:06,557 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:06,567 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/C of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into dcfb6a22d56943f3b73bf0c82544810b(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:06,567 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:06,567 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/C, priority=12, startTime=1733239265538; duration=0sec 2024-12-03T15:21:06,567 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:06,567 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:C 2024-12-03T15:21:06,567 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/7d6b52b70d4342198a45d56886474f9f is 50, key is test_row_0/C:col10/1733239266353/Put/seqid=0 2024-12-03T15:21:06,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741989_1165 (size=12151) 2024-12-03T15:21:06,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-03T15:21:06,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239326670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239326671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239326672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239326673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,710 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,710 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-03T15:21:06,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:06,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:06,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:06,711 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:06,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:06,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:06,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-03T15:21:06,863 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-03T15:21:06,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:06,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:06,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:06,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:06,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:06,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:06,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239326973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239326975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239326975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:06,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:06,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239326976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/7d6b52b70d4342198a45d56886474f9f 2024-12-03T15:21:07,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/4486fe8128af4a9b80a5475dc6f70099 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/4486fe8128af4a9b80a5475dc6f70099 2024-12-03T15:21:07,017 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-03T15:21:07,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:07,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:07,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:07,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:07,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:07,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:07,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/4486fe8128af4a9b80a5475dc6f70099, entries=150, sequenceid=161, filesize=30.4 K 2024-12-03T15:21:07,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/82850905492f41fb942afc4e5fd1cf5d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/82850905492f41fb942afc4e5fd1cf5d 2024-12-03T15:21:07,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/82850905492f41fb942afc4e5fd1cf5d, entries=150, sequenceid=161, filesize=11.9 K 2024-12-03T15:21:07,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/7d6b52b70d4342198a45d56886474f9f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/7d6b52b70d4342198a45d56886474f9f 2024-12-03T15:21:07,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/7d6b52b70d4342198a45d56886474f9f, entries=150, sequenceid=161, filesize=11.9 K 2024-12-03T15:21:07,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 696ms, sequenceid=161, compaction requested=true 2024-12-03T15:21:07,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:07,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,054 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:07,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,056 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93255 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:07,056 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/A is initiating minor compaction (all files) 2024-12-03T15:21:07,056 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/A in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:07,056 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9fd126a698b342a1b36b2c33758ded19, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3f1c81f4eb5949cb9517f71d18f7bdb8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/4486fe8128af4a9b80a5475dc6f70099] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=91.1 K 2024-12-03T15:21:07,056 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:07,056 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9fd126a698b342a1b36b2c33758ded19, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3f1c81f4eb5949cb9517f71d18f7bdb8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/4486fe8128af4a9b80a5475dc6f70099] 2024-12-03T15:21:07,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:07,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:07,056 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9fd126a698b342a1b36b2c33758ded19, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733239264514 2024-12-03T15:21:07,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,057 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:07,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,057 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f1c81f4eb5949cb9517f71d18f7bdb8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733239264842 2024-12-03T15:21:07,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:07,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:07,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:07,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:07,058 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4486fe8128af4a9b80a5475dc6f70099, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733239266038 2024-12-03T15:21:07,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,058 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:07,058 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/B is initiating minor compaction (all files) 2024-12-03T15:21:07,058 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/B in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:07,058 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/72f470fda9354669b542b36edf2630e1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/09326e17b1144aea9c5becd83ee5bcb3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/82850905492f41fb942afc4e5fd1cf5d] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=35.5 K 2024-12-03T15:21:07,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,059 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 72f470fda9354669b542b36edf2630e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733239264514 2024-12-03T15:21:07,060 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 09326e17b1144aea9c5becd83ee5bcb3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733239264842 2024-12-03T15:21:07,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,060 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 82850905492f41fb942afc4e5fd1cf5d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733239266038 2024-12-03T15:21:07,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,067 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:07,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,107 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#B#compaction#139 average throughput is 0.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:07,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,108 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/cf9e42e653744e9e8ac57848d1c19f4c is 50, key is test_row_0/B:col10/1733239266353/Put/seqid=0 2024-12-03T15:21:07,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,109 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241203895b1460ec4b401786921107ed78cc22_9cb1ca4e5b5289fcc2a0bafc5801cb91 store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:07,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,112 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241203895b1460ec4b401786921107ed78cc22_9cb1ca4e5b5289fcc2a0bafc5801cb91, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:07,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,112 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203895b1460ec4b401786921107ed78cc22_9cb1ca4e5b5289fcc2a0bafc5801cb91 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:07,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741990_1166 (size=12493) 2024-12-03T15:21:07,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-03T15:21:07,167 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/cf9e42e653744e9e8ac57848d1c19f4c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/cf9e42e653744e9e8ac57848d1c19f4c 2024-12-03T15:21:07,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,169 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-03T15:21:07,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:07,172 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-03T15:21:07,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:07,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:07,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:07,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:07,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:07,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:07,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,181 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/B of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into cf9e42e653744e9e8ac57848d1c19f4c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:07,181 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:07,182 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/B, priority=13, startTime=1733239267056; duration=0sec 2024-12-03T15:21:07,182 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:07,182 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:B 2024-12-03T15:21:07,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,182 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:07,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,186 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:07,186 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/C is initiating minor compaction (all files) 2024-12-03T15:21:07,187 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/C in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:07,187 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/dcfb6a22d56943f3b73bf0c82544810b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/7c48f5a384e74ff1830704299ecb0bb1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/7d6b52b70d4342198a45d56886474f9f] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=35.5 K 2024-12-03T15:21:07,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,187 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting dcfb6a22d56943f3b73bf0c82544810b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733239264514 2024-12-03T15:21:07,188 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c48f5a384e74ff1830704299ecb0bb1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733239264842 2024-12-03T15:21:07,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,188 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d6b52b70d4342198a45d56886474f9f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733239266038 2024-12-03T15:21:07,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741991_1167 (size=4469) 2024-12-03T15:21:07,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,200 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#A#compaction#138 average throughput is 0.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:07,201 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/9508a975d7c94d2ca91d61b28327bb4c is 175, key is test_row_0/A:col10/1733239266353/Put/seqid=0 2024-12-03T15:21:07,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120330b21585141540efbc98d44664d5b654_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239266361/Put/seqid=0 2024-12-03T15:21:07,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,228 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#C#compaction#141 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:07,229 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/ddff822ea81441e5b386897734fef4d8 is 50, key is test_row_0/C:col10/1733239266353/Put/seqid=0 2024-12-03T15:21:07,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741992_1168 (size=31447) 2024-12-03T15:21:07,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741993_1169 (size=9814) 2024-12-03T15:21:07,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,282 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120330b21585141540efbc98d44664d5b654_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120330b21585141540efbc98d44664d5b654_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:07,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/b0c294f50c654decb15f2b4f4edc8b1a, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:07,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/b0c294f50c654decb15f2b4f4edc8b1a is 175, key is test_row_0/A:col10/1733239266361/Put/seqid=0 2024-12-03T15:21:07,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741994_1170 (size=12493) 2024-12-03T15:21:07,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,299 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/ddff822ea81441e5b386897734fef4d8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/ddff822ea81441e5b386897734fef4d8 2024-12-03T15:21:07,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741995_1171 (size=22461) 2024-12-03T15:21:07,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,307 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=168, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/b0c294f50c654decb15f2b4f4edc8b1a 2024-12-03T15:21:07,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,312 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/C of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into ddff822ea81441e5b386897734fef4d8(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:07,312 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:07,312 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/C, priority=13, startTime=1733239267058; duration=0sec 2024-12-03T15:21:07,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,312 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:07,312 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:C 2024-12-03T15:21:07,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/2d86c9118a9d44488090e0338cd07aad is 50, key is test_row_0/B:col10/1733239266361/Put/seqid=0 2024-12-03T15:21:07,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741996_1172 (size=9757) 2024-12-03T15:21:07,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,358 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/2d86c9118a9d44488090e0338cd07aad 2024-12-03T15:21:07,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/b7686a3051c64ea48ebbcf8870661af4 is 50, key is test_row_0/C:col10/1733239266361/Put/seqid=0 2024-12-03T15:21:07,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741997_1173 (size=9757) 2024-12-03T15:21:07,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:07,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:07,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:07,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239327622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239327622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239327627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239327627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239327630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-03T15:21:07,670 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/9508a975d7c94d2ca91d61b28327bb4c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9508a975d7c94d2ca91d61b28327bb4c 2024-12-03T15:21:07,678 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/A of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 9508a975d7c94d2ca91d61b28327bb4c(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:07,678 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:07,678 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/A, priority=13, startTime=1733239267054; duration=0sec 2024-12-03T15:21:07,678 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:07,678 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:A 2024-12-03T15:21:07,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239327735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239327735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239327737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239327742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239327754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,816 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/b7686a3051c64ea48ebbcf8870661af4 2024-12-03T15:21:07,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/b0c294f50c654decb15f2b4f4edc8b1a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0c294f50c654decb15f2b4f4edc8b1a 2024-12-03T15:21:07,837 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0c294f50c654decb15f2b4f4edc8b1a, entries=100, sequenceid=168, filesize=21.9 K 2024-12-03T15:21:07,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/2d86c9118a9d44488090e0338cd07aad as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/2d86c9118a9d44488090e0338cd07aad 2024-12-03T15:21:07,844 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/2d86c9118a9d44488090e0338cd07aad, entries=100, sequenceid=168, filesize=9.5 K 2024-12-03T15:21:07,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/b7686a3051c64ea48ebbcf8870661af4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b7686a3051c64ea48ebbcf8870661af4 2024-12-03T15:21:07,851 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b7686a3051c64ea48ebbcf8870661af4, entries=100, sequenceid=168, filesize=9.5 K 2024-12-03T15:21:07,855 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 684ms, sequenceid=168, compaction requested=false 2024-12-03T15:21:07,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:07,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:07,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-03T15:21:07,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-03T15:21:07,858 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-03T15:21:07,858 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3000 sec 2024-12-03T15:21:07,860 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.3140 sec 2024-12-03T15:21:07,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=194.56 KB heapSize=510.52 KB 2024-12-03T15:21:07,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:07,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:07,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:07,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:07,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:07,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:07,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:07,955 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239327951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239327953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239327954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239327958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:07,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239327958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:07,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120325b8baeb88f24c8a94560caa0d388e08_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239267624/Put/seqid=0 2024-12-03T15:21:08,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741998_1174 (size=12304) 2024-12-03T15:21:08,014 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,023 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120325b8baeb88f24c8a94560caa0d388e08_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120325b8baeb88f24c8a94560caa0d388e08_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:08,034 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/0966d13702914586ab2a062a69601b45, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:08,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/0966d13702914586ab2a062a69601b45 is 175, key is test_row_0/A:col10/1733239267624/Put/seqid=0 2024-12-03T15:21:08,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741999_1175 (size=31105) 2024-12-03T15:21:08,053 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=203, memsize=64.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/0966d13702914586ab2a062a69601b45 2024-12-03T15:21:08,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239328060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239328063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239328068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/7bd31faaa23f4b5687943c14f005eb4e is 50, key is test_row_0/B:col10/1733239267624/Put/seqid=0 2024-12-03T15:21:08,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742000_1176 (size=12151) 2024-12-03T15:21:08,095 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/7bd31faaa23f4b5687943c14f005eb4e 2024-12-03T15:21:08,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/403d04b519714379a16595f634ba60a6 is 50, key is test_row_0/C:col10/1733239267624/Put/seqid=0 2024-12-03T15:21:08,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742001_1177 (size=12151) 2024-12-03T15:21:08,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/403d04b519714379a16595f634ba60a6 2024-12-03T15:21:08,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/0966d13702914586ab2a062a69601b45 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0966d13702914586ab2a062a69601b45 2024-12-03T15:21:08,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0966d13702914586ab2a062a69601b45, entries=150, sequenceid=203, filesize=30.4 K 2024-12-03T15:21:08,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/7bd31faaa23f4b5687943c14f005eb4e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/7bd31faaa23f4b5687943c14f005eb4e 2024-12-03T15:21:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/7bd31faaa23f4b5687943c14f005eb4e, entries=150, sequenceid=203, filesize=11.9 K 2024-12-03T15:21:08,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/403d04b519714379a16595f634ba60a6 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/403d04b519714379a16595f634ba60a6 2024-12-03T15:21:08,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/403d04b519714379a16595f634ba60a6, entries=150, sequenceid=203, filesize=11.9 K 2024-12-03T15:21:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,168 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=6.71 KB/6870 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 217ms, sequenceid=203, compaction requested=true 2024-12-03T15:21:08,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:08,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:08,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:08,168 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:08,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:08,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:08,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:08,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:08,169 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,170 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85013 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:08,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,170 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/A is initiating minor compaction (all files) 2024-12-03T15:21:08,170 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/A in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:08,171 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9508a975d7c94d2ca91d61b28327bb4c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0c294f50c654decb15f2b4f4edc8b1a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0966d13702914586ab2a062a69601b45] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=83.0 K 2024-12-03T15:21:08,171 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:08,171 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9508a975d7c94d2ca91d61b28327bb4c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0c294f50c654decb15f2b4f4edc8b1a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0966d13702914586ab2a062a69601b45] 2024-12-03T15:21:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,171 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,171 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/B is initiating minor compaction (all files) 2024-12-03T15:21:08,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,171 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/B in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:08,171 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/cf9e42e653744e9e8ac57848d1c19f4c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/2d86c9118a9d44488090e0338cd07aad, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/7bd31faaa23f4b5687943c14f005eb4e] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=33.6 K 2024-12-03T15:21:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,172 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9508a975d7c94d2ca91d61b28327bb4c, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733239266038 2024-12-03T15:21:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,172 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting cf9e42e653744e9e8ac57848d1c19f4c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733239266038 2024-12-03T15:21:08,172 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0c294f50c654decb15f2b4f4edc8b1a, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733239266360 2024-12-03T15:21:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,172 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d86c9118a9d44488090e0338cd07aad, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733239266360 2024-12-03T15:21:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,173 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0966d13702914586ab2a062a69601b45, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1733239267624 2024-12-03T15:21:08,173 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bd31faaa23f4b5687943c14f005eb4e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1733239267624 2024-12-03T15:21:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,192 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#B#compaction#147 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,192 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/abd2ffaa9a16486e90e5e8d02c379a5c is 50, key is test_row_0/B:col10/1733239267624/Put/seqid=0 2024-12-03T15:21:08,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,195 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,213 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241203a3ef62c7b4d348989d7d75c6faa350e4_9cb1ca4e5b5289fcc2a0bafc5801cb91 store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,216 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241203a3ef62c7b4d348989d7d75c6faa350e4_9cb1ca4e5b5289fcc2a0bafc5801cb91, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:08,216 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203a3ef62c7b4d348989d7d75c6faa350e4_9cb1ca4e5b5289fcc2a0bafc5801cb91 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742002_1178 (size=12595) 2024-12-03T15:21:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,247 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/abd2ffaa9a16486e90e5e8d02c379a5c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/abd2ffaa9a16486e90e5e8d02c379a5c 2024-12-03T15:21:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742003_1179 (size=4469) 2024-12-03T15:21:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,257 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#A#compaction#148 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,258 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/e6d788cc834c4e7abd85e3220b3ba3c0 is 175, key is test_row_0/A:col10/1733239267624/Put/seqid=0 2024-12-03T15:21:08,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,260 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/B of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into abd2ffaa9a16486e90e5e8d02c379a5c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:08,260 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:08,260 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/B, priority=13, startTime=1733239268168; duration=0sec 2024-12-03T15:21:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,262 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:08,262 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:B 2024-12-03T15:21:08,262 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,264 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:08,264 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/C is initiating minor compaction (all files) 2024-12-03T15:21:08,265 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/C in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:08,265 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/ddff822ea81441e5b386897734fef4d8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b7686a3051c64ea48ebbcf8870661af4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/403d04b519714379a16595f634ba60a6] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=33.6 K 2024-12-03T15:21:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,266 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting ddff822ea81441e5b386897734fef4d8, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733239266038 2024-12-03T15:21:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,266 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b7686a3051c64ea48ebbcf8870661af4, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733239266360 2024-12-03T15:21:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,267 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 403d04b519714379a16595f634ba60a6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1733239267624 2024-12-03T15:21:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,293 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#C#compaction#149 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,294 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/b78eccadf6f144be87cb7060fbf21f1c is 50, key is test_row_0/C:col10/1733239267624/Put/seqid=0 2024-12-03T15:21:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:08,303 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:21:08,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:08,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:08,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742004_1180 (size=31549) 2024-12-03T15:21:08,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:08,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:08,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:08,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,315 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/e6d788cc834c4e7abd85e3220b3ba3c0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/e6d788cc834c4e7abd85e3220b3ba3c0 2024-12-03T15:21:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,324 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/A of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into e6d788cc834c4e7abd85e3220b3ba3c0(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:08,324 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:08,324 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/A, priority=13, startTime=1733239268168; duration=0sec 2024-12-03T15:21:08,324 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:08,324 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:A 2024-12-03T15:21:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203bd9a5aaac32b4081869b4b8620433913_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239268302/Put/seqid=0 2024-12-03T15:21:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742005_1181 (size=12595) 2024-12-03T15:21:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,342 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/b78eccadf6f144be87cb7060fbf21f1c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b78eccadf6f144be87cb7060fbf21f1c 2024-12-03T15:21:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,349 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/C of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into b78eccadf6f144be87cb7060fbf21f1c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:08,349 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:08,349 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/C, priority=13, startTime=1733239268169; duration=0sec 2024-12-03T15:21:08,349 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:08,349 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:C 2024-12-03T15:21:08,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742007_1183 (size=24758) 2024-12-03T15:21:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239328358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239328363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239328366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239328367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239328369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,371 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:08,377 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203bd9a5aaac32b4081869b4b8620433913_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203bd9a5aaac32b4081869b4b8620433913_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:08,379 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/78638d74317849b5a18bd63ed628da5b, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:08,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/78638d74317849b5a18bd63ed628da5b is 175, key is test_row_0/A:col10/1733239268302/Put/seqid=0 2024-12-03T15:21:08,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742006_1182 (size=74395) 2024-12-03T15:21:08,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239328472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239328472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239328473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239328473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239328478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-03T15:21:08,672 INFO [Thread-634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-03T15:21:08,675 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:08,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-03T15:21:08,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239328676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239328678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,679 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:08,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-03T15:21:08,680 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:08,681 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:08,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239328680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239328680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239328683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-03T15:21:08,793 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/78638d74317849b5a18bd63ed628da5b 2024-12-03T15:21:08,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/5eaeafca6eb444c292cce9fe421ab833 is 50, key is test_row_0/B:col10/1733239268302/Put/seqid=0 2024-12-03T15:21:08,835 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-03T15:21:08,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:08,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:08,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:08,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:08,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:08,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:08,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742008_1184 (size=12151) 2024-12-03T15:21:08,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-03T15:21:08,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239328981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239328981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239328982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239328984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:08,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239328987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,989 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:08,990 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-03T15:21:08,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:08,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:08,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:08,990 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:08,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:08,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:09,143 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-03T15:21:09,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:09,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:09,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:09,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:09,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:09,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:09,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/5eaeafca6eb444c292cce9fe421ab833 2024-12-03T15:21:09,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/90f82e252a3e46e99d0b153029c9dd6c is 50, key is test_row_0/C:col10/1733239268302/Put/seqid=0 2024-12-03T15:21:09,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742009_1185 (size=12151) 2024-12-03T15:21:09,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/90f82e252a3e46e99d0b153029c9dd6c 2024-12-03T15:21:09,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/78638d74317849b5a18bd63ed628da5b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/78638d74317849b5a18bd63ed628da5b 2024-12-03T15:21:09,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/78638d74317849b5a18bd63ed628da5b, entries=400, sequenceid=215, filesize=72.7 K 2024-12-03T15:21:09,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/5eaeafca6eb444c292cce9fe421ab833 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/5eaeafca6eb444c292cce9fe421ab833 2024-12-03T15:21:09,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-03T15:21:09,287 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/5eaeafca6eb444c292cce9fe421ab833, entries=150, sequenceid=215, filesize=11.9 K 2024-12-03T15:21:09,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/90f82e252a3e46e99d0b153029c9dd6c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/90f82e252a3e46e99d0b153029c9dd6c 2024-12-03T15:21:09,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/90f82e252a3e46e99d0b153029c9dd6c, entries=150, sequenceid=215, filesize=11.9 K 2024-12-03T15:21:09,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 992ms, sequenceid=215, compaction requested=false 2024-12-03T15:21:09,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:09,296 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-03T15:21:09,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:09,297 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-03T15:21:09,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:09,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:09,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:09,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:09,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:09,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:09,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203ae48f887a661453e81789b4961622898_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239268366/Put/seqid=0 2024-12-03T15:21:09,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742010_1186 (size=12304) 2024-12-03T15:21:09,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:09,360 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203ae48f887a661453e81789b4961622898_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203ae48f887a661453e81789b4961622898_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:09,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/d9845c359a6b44ba9853776cd51bfbd0, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:09,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/d9845c359a6b44ba9853776cd51bfbd0 is 175, key is test_row_0/A:col10/1733239268366/Put/seqid=0 2024-12-03T15:21:09,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742011_1187 (size=31105) 2024-12-03T15:21:09,422 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=243, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/d9845c359a6b44ba9853776cd51bfbd0 2024-12-03T15:21:09,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/c8a37f1eaa3648aab4f1f708ff626b12 is 50, key is test_row_0/B:col10/1733239268366/Put/seqid=0 2024-12-03T15:21:09,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:09,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:09,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742012_1188 (size=12151) 2024-12-03T15:21:09,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239329497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239329499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239329518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239329518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239329520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239329621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239329623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239329624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239329628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-03T15:21:09,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239329825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239329825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239329826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239329835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:09,890 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/c8a37f1eaa3648aab4f1f708ff626b12 2024-12-03T15:21:09,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/b8912665372743fe917dd48bfa13d990 is 50, key is test_row_0/C:col10/1733239268366/Put/seqid=0 2024-12-03T15:21:09,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742013_1189 (size=12151) 2024-12-03T15:21:09,955 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/b8912665372743fe917dd48bfa13d990 2024-12-03T15:21:09,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/d9845c359a6b44ba9853776cd51bfbd0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/d9845c359a6b44ba9853776cd51bfbd0 2024-12-03T15:21:09,969 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/d9845c359a6b44ba9853776cd51bfbd0, entries=150, sequenceid=243, filesize=30.4 K 2024-12-03T15:21:09,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/c8a37f1eaa3648aab4f1f708ff626b12 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/c8a37f1eaa3648aab4f1f708ff626b12 2024-12-03T15:21:10,003 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/c8a37f1eaa3648aab4f1f708ff626b12, entries=150, sequenceid=243, filesize=11.9 K 2024-12-03T15:21:10,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/b8912665372743fe917dd48bfa13d990 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b8912665372743fe917dd48bfa13d990 2024-12-03T15:21:10,011 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b8912665372743fe917dd48bfa13d990, entries=150, sequenceid=243, filesize=11.9 K 2024-12-03T15:21:10,013 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 716ms, sequenceid=243, compaction requested=true 2024-12-03T15:21:10,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:10,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:10,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-03T15:21:10,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-03T15:21:10,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-03T15:21:10,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3350 sec 2024-12-03T15:21:10,024 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.3420 sec 2024-12-03T15:21:10,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:10,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-03T15:21:10,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:10,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:10,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:10,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:10,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:10,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:10,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412030d93ea6459794627b4b3e90976dd0136_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239270134/Put/seqid=0 2024-12-03T15:21:10,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742014_1190 (size=12304) 2024-12-03T15:21:10,206 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:10,213 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412030d93ea6459794627b4b3e90976dd0136_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412030d93ea6459794627b4b3e90976dd0136_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:10,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239330207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239330207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239330210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239330211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,217 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/bd28e0ee9cc94d1296d385ce6523b4d2, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:10,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/bd28e0ee9cc94d1296d385ce6523b4d2 is 175, key is test_row_0/A:col10/1733239270134/Put/seqid=0 2024-12-03T15:21:10,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742015_1191 (size=31105) 2024-12-03T15:21:10,250 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=256, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/bd28e0ee9cc94d1296d385ce6523b4d2 2024-12-03T15:21:10,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/40525988ade04520af123145262cce43 is 50, key is test_row_0/B:col10/1733239270134/Put/seqid=0 2024-12-03T15:21:10,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742016_1192 (size=12151) 2024-12-03T15:21:10,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/40525988ade04520af123145262cce43 2024-12-03T15:21:10,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/b1239b681b754788a63e4363ce121728 is 50, key is test_row_0/C:col10/1733239270134/Put/seqid=0 2024-12-03T15:21:10,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239330315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742017_1193 (size=12151) 2024-12-03T15:21:10,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239330316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239330316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239330317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239330517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239330522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239330523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239330524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239330526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/b1239b681b754788a63e4363ce121728 2024-12-03T15:21:10,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/bd28e0ee9cc94d1296d385ce6523b4d2 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/bd28e0ee9cc94d1296d385ce6523b4d2 2024-12-03T15:21:10,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/bd28e0ee9cc94d1296d385ce6523b4d2, entries=150, sequenceid=256, filesize=30.4 K 2024-12-03T15:21:10,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/40525988ade04520af123145262cce43 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/40525988ade04520af123145262cce43 2024-12-03T15:21:10,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/40525988ade04520af123145262cce43, entries=150, sequenceid=256, filesize=11.9 K 2024-12-03T15:21:10,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/b1239b681b754788a63e4363ce121728 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b1239b681b754788a63e4363ce121728 2024-12-03T15:21:10,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b1239b681b754788a63e4363ce121728, entries=150, sequenceid=256, filesize=11.9 K 2024-12-03T15:21:10,757 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 618ms, sequenceid=256, compaction requested=true 2024-12-03T15:21:10,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:10,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:10,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:10,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:10,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:10,758 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:10,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:10,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:21:10,760 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 168154 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:10,760 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/A is initiating minor compaction (all files) 2024-12-03T15:21:10,760 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/A in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:10,760 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/e6d788cc834c4e7abd85e3220b3ba3c0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/78638d74317849b5a18bd63ed628da5b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/d9845c359a6b44ba9853776cd51bfbd0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/bd28e0ee9cc94d1296d385ce6523b4d2] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=164.2 K 2024-12-03T15:21:10,761 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:10,761 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/e6d788cc834c4e7abd85e3220b3ba3c0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/78638d74317849b5a18bd63ed628da5b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/d9845c359a6b44ba9853776cd51bfbd0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/bd28e0ee9cc94d1296d385ce6523b4d2] 2024-12-03T15:21:10,761 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6d788cc834c4e7abd85e3220b3ba3c0, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1733239267624 2024-12-03T15:21:10,761 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:10,762 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78638d74317849b5a18bd63ed628da5b, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733239267953 2024-12-03T15:21:10,765 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9845c359a6b44ba9853776cd51bfbd0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733239268354 2024-12-03T15:21:10,766 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd28e0ee9cc94d1296d385ce6523b4d2, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733239269497 2024-12-03T15:21:10,767 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:10,768 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/B is initiating minor compaction (all files) 2024-12-03T15:21:10,768 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/B in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:10,768 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/abd2ffaa9a16486e90e5e8d02c379a5c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/5eaeafca6eb444c292cce9fe421ab833, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/c8a37f1eaa3648aab4f1f708ff626b12, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/40525988ade04520af123145262cce43] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=47.9 K 2024-12-03T15:21:10,768 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting abd2ffaa9a16486e90e5e8d02c379a5c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1733239267624 2024-12-03T15:21:10,769 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 5eaeafca6eb444c292cce9fe421ab833, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733239268291 2024-12-03T15:21:10,770 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c8a37f1eaa3648aab4f1f708ff626b12, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733239268354 2024-12-03T15:21:10,770 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 40525988ade04520af123145262cce43, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733239269497 2024-12-03T15:21:10,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-03T15:21:10,786 INFO [Thread-634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-03T15:21:10,787 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:10,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-03T15:21:10,805 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:10,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-03T15:21:10,806 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:10,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:10,812 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#B#compaction#159 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:10,813 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/750d4597e5bd4fbeab232d69bbfc824f is 50, key is test_row_0/B:col10/1733239270134/Put/seqid=0 2024-12-03T15:21:10,815 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:10,829 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-03T15:21:10,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:10,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:10,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:10,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:10,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:10,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:10,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:10,842 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412039558f1132bf84c3a894acac3234baf7d_9cb1ca4e5b5289fcc2a0bafc5801cb91 store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:10,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239330843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,845 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412039558f1132bf84c3a894acac3234baf7d_9cb1ca4e5b5289fcc2a0bafc5801cb91, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:10,845 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412039558f1132bf84c3a894acac3234baf7d_9cb1ca4e5b5289fcc2a0bafc5801cb91 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:10,847 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239330845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239330846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239330846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742018_1194 (size=12731) 2024-12-03T15:21:10,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412038e6ada6154f041e9ae93e583d0fccca6_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239270199/Put/seqid=0 2024-12-03T15:21:10,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742019_1195 (size=4469) 2024-12-03T15:21:10,875 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/750d4597e5bd4fbeab232d69bbfc824f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/750d4597e5bd4fbeab232d69bbfc824f 2024-12-03T15:21:10,878 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#A#compaction#160 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:10,878 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/0097baedd2a04817b4713a4fe68eda54 is 175, key is test_row_0/A:col10/1733239270134/Put/seqid=0 2024-12-03T15:21:10,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742020_1196 (size=12454) 2024-12-03T15:21:10,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742021_1197 (size=31685) 2024-12-03T15:21:10,891 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/B of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 750d4597e5bd4fbeab232d69bbfc824f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:10,891 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:10,891 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/B, priority=12, startTime=1733239270758; duration=0sec 2024-12-03T15:21:10,892 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:10,892 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:B 2024-12-03T15:21:10,892 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:10,894 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:10,894 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/C is initiating minor compaction (all files) 2024-12-03T15:21:10,894 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/C in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:10,894 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b78eccadf6f144be87cb7060fbf21f1c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/90f82e252a3e46e99d0b153029c9dd6c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b8912665372743fe917dd48bfa13d990, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b1239b681b754788a63e4363ce121728] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=47.9 K 2024-12-03T15:21:10,896 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b78eccadf6f144be87cb7060fbf21f1c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1733239267624 2024-12-03T15:21:10,897 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 90f82e252a3e46e99d0b153029c9dd6c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733239268291 2024-12-03T15:21:10,897 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b8912665372743fe917dd48bfa13d990, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733239268354 2024-12-03T15:21:10,906 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b1239b681b754788a63e4363ce121728, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733239269497 2024-12-03T15:21:10,906 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/0097baedd2a04817b4713a4fe68eda54 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0097baedd2a04817b4713a4fe68eda54 2024-12-03T15:21:10,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-03T15:21:10,928 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/A of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 0097baedd2a04817b4713a4fe68eda54(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:10,928 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:10,928 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/A, priority=12, startTime=1733239270758; duration=0sec 2024-12-03T15:21:10,928 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:10,928 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:A 2024-12-03T15:21:10,940 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#C#compaction#162 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:10,941 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/25a34a67abb44411b0e039be4e7add56 is 50, key is test_row_0/C:col10/1733239270134/Put/seqid=0 2024-12-03T15:21:10,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239330949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239330949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239330950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:10,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239330950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,958 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:10,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-03T15:21:10,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:10,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:10,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:10,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:10,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:10,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:10,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742022_1198 (size=12731) 2024-12-03T15:21:11,057 INFO [master/2b5ef621a0dd:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-03T15:21:11,057 INFO [master/2b5ef621a0dd:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-03T15:21:11,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-03T15:21:11,112 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-03T15:21:11,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:11,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:11,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:11,114 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239331152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:11,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239331152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:11,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239331154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:11,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239331154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,266 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-03T15:21:11,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:11,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:11,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:11,267 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,292 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:11,296 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412038e6ada6154f041e9ae93e583d0fccca6_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412038e6ada6154f041e9ae93e583d0fccca6_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:11,297 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/59bff90381ec4ed1b07d8ebb4bcf159c, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:11,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/59bff90381ec4ed1b07d8ebb4bcf159c is 175, key is test_row_0/A:col10/1733239270199/Put/seqid=0 2024-12-03T15:21:11,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742023_1199 (size=31255) 2024-12-03T15:21:11,379 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/25a34a67abb44411b0e039be4e7add56 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/25a34a67abb44411b0e039be4e7add56 2024-12-03T15:21:11,385 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/C of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 25a34a67abb44411b0e039be4e7add56(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:11,385 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:11,385 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/C, priority=12, startTime=1733239270758; duration=0sec 2024-12-03T15:21:11,385 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:11,385 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:C 2024-12-03T15:21:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-03T15:21:11,419 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,419 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-03T15:21:11,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:11,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:11,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:11,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:11,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239331455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:11,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239331456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239331458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239331459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,572 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,572 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-03T15:21:11,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:11,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:11,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:11,573 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,702 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=281, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/59bff90381ec4ed1b07d8ebb4bcf159c 2024-12-03T15:21:11,710 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/4bd7f32ce2fe40c094ec766c54772fd8 is 50, key is test_row_0/B:col10/1733239270199/Put/seqid=0 2024-12-03T15:21:11,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742024_1200 (size=12301) 2024-12-03T15:21:11,725 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-03T15:21:11,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:11,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:11,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:11,726 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,878 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-03T15:21:11,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:11,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:11,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:11,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:11,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-03T15:21:11,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:11,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239331958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:11,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239331960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:11,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239331960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:11,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:11,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239331963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:12,031 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:12,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-03T15:21:12,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:12,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:12,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:12,032 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:12,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:12,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:12,117 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/4bd7f32ce2fe40c094ec766c54772fd8 2024-12-03T15:21:12,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/4da897ad0167477b915a35820f3f911d is 50, key is test_row_0/C:col10/1733239270199/Put/seqid=0 2024-12-03T15:21:12,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742025_1201 (size=12301) 2024-12-03T15:21:12,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/4da897ad0167477b915a35820f3f911d 2024-12-03T15:21:12,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/59bff90381ec4ed1b07d8ebb4bcf159c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/59bff90381ec4ed1b07d8ebb4bcf159c 2024-12-03T15:21:12,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/59bff90381ec4ed1b07d8ebb4bcf159c, entries=150, sequenceid=281, filesize=30.5 K 2024-12-03T15:21:12,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/4bd7f32ce2fe40c094ec766c54772fd8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/4bd7f32ce2fe40c094ec766c54772fd8 2024-12-03T15:21:12,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/4bd7f32ce2fe40c094ec766c54772fd8, entries=150, sequenceid=281, filesize=12.0 K 2024-12-03T15:21:12,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/4da897ad0167477b915a35820f3f911d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4da897ad0167477b915a35820f3f911d 2024-12-03T15:21:12,154 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4da897ad0167477b915a35820f3f911d, entries=150, sequenceid=281, filesize=12.0 K 2024-12-03T15:21:12,155 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 1326ms, sequenceid=281, compaction requested=false 2024-12-03T15:21:12,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:12,186 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:12,187 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-03T15:21:12,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:12,187 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:21:12,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:12,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:12,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:12,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:12,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:12,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:12,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120397951e7de8b442d4b8a6e6b08804c2c8_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239270843/Put/seqid=0 2024-12-03T15:21:12,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742026_1202 (size=12454) 2024-12-03T15:21:12,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:12,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:12,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:12,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239332599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:12,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:12,621 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120397951e7de8b442d4b8a6e6b08804c2c8_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120397951e7de8b442d4b8a6e6b08804c2c8_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:12,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/1692b82813a64d068bcac28e9880565b, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:12,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/1692b82813a64d068bcac28e9880565b is 175, key is test_row_0/A:col10/1733239270843/Put/seqid=0 2024-12-03T15:21:12,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742027_1203 (size=31255) 2024-12-03T15:21:12,628 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/1692b82813a64d068bcac28e9880565b 2024-12-03T15:21:12,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/4be6e188602e4ce38bd0442414a8e5f4 is 50, key is test_row_0/B:col10/1733239270843/Put/seqid=0 2024-12-03T15:21:12,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742028_1204 (size=12301) 2024-12-03T15:21:12,642 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/4be6e188602e4ce38bd0442414a8e5f4 2024-12-03T15:21:12,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/e9b0c553b67a4591bc4f4d45c65a2831 is 50, key is test_row_0/C:col10/1733239270843/Put/seqid=0 2024-12-03T15:21:12,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742029_1205 (size=12301) 2024-12-03T15:21:12,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:12,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239332702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:12,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:12,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239332908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:12,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-03T15:21:12,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239332964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:12,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239332967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:12,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:12,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239332969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:12,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:12,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239332973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:13,070 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/e9b0c553b67a4591bc4f4d45c65a2831 2024-12-03T15:21:13,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/1692b82813a64d068bcac28e9880565b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/1692b82813a64d068bcac28e9880565b 2024-12-03T15:21:13,080 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/1692b82813a64d068bcac28e9880565b, entries=150, sequenceid=295, filesize=30.5 K 2024-12-03T15:21:13,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/4be6e188602e4ce38bd0442414a8e5f4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/4be6e188602e4ce38bd0442414a8e5f4 2024-12-03T15:21:13,098 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/4be6e188602e4ce38bd0442414a8e5f4, entries=150, sequenceid=295, filesize=12.0 K 2024-12-03T15:21:13,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/e9b0c553b67a4591bc4f4d45c65a2831 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/e9b0c553b67a4591bc4f4d45c65a2831 2024-12-03T15:21:13,104 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/e9b0c553b67a4591bc4f4d45c65a2831, entries=150, sequenceid=295, filesize=12.0 K 2024-12-03T15:21:13,105 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 918ms, sequenceid=295, compaction requested=true 2024-12-03T15:21:13,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:13,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:13,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-03T15:21:13,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-03T15:21:13,107 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-03T15:21:13,107 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3000 sec 2024-12-03T15:21:13,108 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.3200 sec 2024-12-03T15:21:13,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:13,220 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-03T15:21:13,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:13,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:13,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:13,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:13,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:13,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:13,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412037d752ed5f5d34f86b0bba64fdd597924_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239272581/Put/seqid=0 2024-12-03T15:21:13,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742030_1206 (size=14994) 2024-12-03T15:21:13,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239333238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:13,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:13,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239333342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:13,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239333545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:13,634 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:13,639 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412037d752ed5f5d34f86b0bba64fdd597924_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412037d752ed5f5d34f86b0bba64fdd597924_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:13,640 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/70ee567867e34a19bb19817b15e2e604, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:13,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/70ee567867e34a19bb19817b15e2e604 is 175, key is test_row_0/A:col10/1733239272581/Put/seqid=0 2024-12-03T15:21:13,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742031_1207 (size=39949) 2024-12-03T15:21:13,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239333850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:14,046 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=321, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/70ee567867e34a19bb19817b15e2e604 2024-12-03T15:21:14,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/869427990f7a437183a58961412571d7 is 50, key is test_row_0/B:col10/1733239272581/Put/seqid=0 2024-12-03T15:21:14,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742032_1208 (size=12301) 2024-12-03T15:21:14,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/869427990f7a437183a58961412571d7 2024-12-03T15:21:14,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/de2756d7490947708f0ee61c370c2654 is 50, key is test_row_0/C:col10/1733239272581/Put/seqid=0 2024-12-03T15:21:14,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742033_1209 (size=12301) 2024-12-03T15:21:14,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:14,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239334357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:14,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/de2756d7490947708f0ee61c370c2654 2024-12-03T15:21:14,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/70ee567867e34a19bb19817b15e2e604 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/70ee567867e34a19bb19817b15e2e604 2024-12-03T15:21:14,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/70ee567867e34a19bb19817b15e2e604, entries=200, sequenceid=321, filesize=39.0 K 2024-12-03T15:21:14,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/869427990f7a437183a58961412571d7 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/869427990f7a437183a58961412571d7 2024-12-03T15:21:14,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/869427990f7a437183a58961412571d7, entries=150, sequenceid=321, filesize=12.0 K 2024-12-03T15:21:14,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/de2756d7490947708f0ee61c370c2654 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/de2756d7490947708f0ee61c370c2654 2024-12-03T15:21:14,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/de2756d7490947708f0ee61c370c2654, entries=150, sequenceid=321, filesize=12.0 K 2024-12-03T15:21:14,548 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 1328ms, sequenceid=321, compaction requested=true 2024-12-03T15:21:14,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:14,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:14,548 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:14,548 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:14,548 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:14,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:14,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:14,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:14,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:14,554 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134144 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:14,554 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/A is initiating minor compaction (all files) 2024-12-03T15:21:14,554 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/A in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:14,554 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:14,554 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0097baedd2a04817b4713a4fe68eda54, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/59bff90381ec4ed1b07d8ebb4bcf159c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/1692b82813a64d068bcac28e9880565b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/70ee567867e34a19bb19817b15e2e604] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=131 K 2024-12-03T15:21:14,554 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/B is initiating minor compaction (all files) 2024-12-03T15:21:14,554 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:14,554 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0097baedd2a04817b4713a4fe68eda54, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/59bff90381ec4ed1b07d8ebb4bcf159c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/1692b82813a64d068bcac28e9880565b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/70ee567867e34a19bb19817b15e2e604] 2024-12-03T15:21:14,555 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/B in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:14,555 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/750d4597e5bd4fbeab232d69bbfc824f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/4bd7f32ce2fe40c094ec766c54772fd8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/4be6e188602e4ce38bd0442414a8e5f4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/869427990f7a437183a58961412571d7] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=48.5 K 2024-12-03T15:21:14,556 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0097baedd2a04817b4713a4fe68eda54, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733239269497 2024-12-03T15:21:14,556 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 750d4597e5bd4fbeab232d69bbfc824f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733239269497 2024-12-03T15:21:14,556 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59bff90381ec4ed1b07d8ebb4bcf159c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1733239270199 2024-12-03T15:21:14,556 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 4bd7f32ce2fe40c094ec766c54772fd8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1733239270199 2024-12-03T15:21:14,557 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 4be6e188602e4ce38bd0442414a8e5f4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733239270833 2024-12-03T15:21:14,557 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1692b82813a64d068bcac28e9880565b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733239270833 2024-12-03T15:21:14,557 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 869427990f7a437183a58961412571d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733239272575 2024-12-03T15:21:14,557 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70ee567867e34a19bb19817b15e2e604, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733239272575 2024-12-03T15:21:14,584 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#B#compaction#171 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:14,584 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:14,584 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/22dd8c86bfa8469b9768e2ef37174a56 is 50, key is test_row_0/B:col10/1733239272581/Put/seqid=0 2024-12-03T15:21:14,602 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120399ea519ebeb843fbaf74db366c90f957_9cb1ca4e5b5289fcc2a0bafc5801cb91 store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:14,606 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120399ea519ebeb843fbaf74db366c90f957_9cb1ca4e5b5289fcc2a0bafc5801cb91, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:14,606 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120399ea519ebeb843fbaf74db366c90f957_9cb1ca4e5b5289fcc2a0bafc5801cb91 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:14,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742034_1210 (size=13017) 2024-12-03T15:21:14,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742035_1211 (size=4469) 2024-12-03T15:21:14,631 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#A#compaction#172 average throughput is 0.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:14,632 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/c7d1d69d6a4d4d2f88a4600eaa23ef4b is 175, key is test_row_0/A:col10/1733239272581/Put/seqid=0 2024-12-03T15:21:14,635 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/22dd8c86bfa8469b9768e2ef37174a56 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/22dd8c86bfa8469b9768e2ef37174a56 2024-12-03T15:21:14,650 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/B of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 22dd8c86bfa8469b9768e2ef37174a56(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:14,650 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:14,650 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/B, priority=12, startTime=1733239274548; duration=0sec 2024-12-03T15:21:14,650 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:14,650 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:B 2024-12-03T15:21:14,651 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:14,652 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:14,652 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/C is initiating minor compaction (all files) 2024-12-03T15:21:14,652 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/C in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:14,652 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/25a34a67abb44411b0e039be4e7add56, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4da897ad0167477b915a35820f3f911d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/e9b0c553b67a4591bc4f4d45c65a2831, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/de2756d7490947708f0ee61c370c2654] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=48.5 K 2024-12-03T15:21:14,653 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 25a34a67abb44411b0e039be4e7add56, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1733239269497 2024-12-03T15:21:14,655 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 4da897ad0167477b915a35820f3f911d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1733239270199 2024-12-03T15:21:14,656 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting e9b0c553b67a4591bc4f4d45c65a2831, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733239270833 2024-12-03T15:21:14,657 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting de2756d7490947708f0ee61c370c2654, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733239272575 2024-12-03T15:21:14,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742036_1212 (size=31971) 2024-12-03T15:21:14,679 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/c7d1d69d6a4d4d2f88a4600eaa23ef4b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/c7d1d69d6a4d4d2f88a4600eaa23ef4b 2024-12-03T15:21:14,688 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#C#compaction#173 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:14,688 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/A of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into c7d1d69d6a4d4d2f88a4600eaa23ef4b(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:14,688 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:14,688 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/A, priority=12, startTime=1733239274548; duration=0sec 2024-12-03T15:21:14,688 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:14,688 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:A 2024-12-03T15:21:14,688 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/4ebe1bf939aa43b6aa683ecaf0434bc3 is 50, key is test_row_0/C:col10/1733239272581/Put/seqid=0 2024-12-03T15:21:14,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742037_1213 (size=13017) 2024-12-03T15:21:14,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-03T15:21:14,916 INFO [Thread-634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-03T15:21:14,920 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:14,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-03T15:21:14,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-03T15:21:14,924 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:14,927 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:14,927 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:14,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:14,978 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:21:14,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:14,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:14,978 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:14,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:14,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:14,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:14,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412035862bbb09e034aa799d25c1cdf887f38_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239273224/Put/seqid=0 2024-12-03T15:21:15,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742038_1214 (size=14994) 2024-12-03T15:21:15,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239335005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239335005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239335007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239335007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-03T15:21:15,082 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:15,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:15,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:15,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:15,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239335112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239335112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239335112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239335112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,116 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/4ebe1bf939aa43b6aa683ecaf0434bc3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4ebe1bf939aa43b6aa683ecaf0434bc3 2024-12-03T15:21:15,123 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/C of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 4ebe1bf939aa43b6aa683ecaf0434bc3(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:15,123 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:15,123 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/C, priority=12, startTime=1733239274549; duration=0sec 2024-12-03T15:21:15,123 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:15,123 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:C 2024-12-03T15:21:15,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-03T15:21:15,235 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,236 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:15,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:15,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:15,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:15,236 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239335315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239335316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239335316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239335318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239335360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,390 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:15,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:15,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:15,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:15,391 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,407 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:15,412 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412035862bbb09e034aa799d25c1cdf887f38_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412035862bbb09e034aa799d25c1cdf887f38_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:15,413 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/3632e46c3d7d4612a77459cedef72e01, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:15,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/3632e46c3d7d4612a77459cedef72e01 is 175, key is test_row_0/A:col10/1733239273224/Put/seqid=0 2024-12-03T15:21:15,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742039_1215 (size=39949) 2024-12-03T15:21:15,438 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=334, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/3632e46c3d7d4612a77459cedef72e01 2024-12-03T15:21:15,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/378cfe87311e497f9681be50d48e536d is 50, key is test_row_0/B:col10/1733239273224/Put/seqid=0 2024-12-03T15:21:15,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742040_1216 (size=12301) 2024-12-03T15:21:15,483 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/378cfe87311e497f9681be50d48e536d 2024-12-03T15:21:15,492 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/44fef929c17541029ba0e8f81e866afc is 50, key is test_row_0/C:col10/1733239273224/Put/seqid=0 2024-12-03T15:21:15,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742041_1217 (size=12301) 2024-12-03T15:21:15,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/44fef929c17541029ba0e8f81e866afc 2024-12-03T15:21:15,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/3632e46c3d7d4612a77459cedef72e01 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3632e46c3d7d4612a77459cedef72e01 2024-12-03T15:21:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-03T15:21:15,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3632e46c3d7d4612a77459cedef72e01, entries=200, sequenceid=334, filesize=39.0 K 2024-12-03T15:21:15,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/378cfe87311e497f9681be50d48e536d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/378cfe87311e497f9681be50d48e536d 2024-12-03T15:21:15,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/378cfe87311e497f9681be50d48e536d, entries=150, sequenceid=334, filesize=12.0 K 2024-12-03T15:21:15,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/44fef929c17541029ba0e8f81e866afc as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/44fef929c17541029ba0e8f81e866afc 2024-12-03T15:21:15,543 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:15,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:15,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:15,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:15,545 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/44fef929c17541029ba0e8f81e866afc, entries=150, sequenceid=334, filesize=12.0 K 2024-12-03T15:21:15,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 572ms, sequenceid=334, compaction requested=false 2024-12-03T15:21:15,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:15,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:15,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-03T15:21:15,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:15,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:15,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:15,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:15,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:15,626 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:15,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412033271e4293d844b6c943abd9fe05f3c90_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239275623/Put/seqid=0 2024-12-03T15:21:15,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239335669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239335671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239335673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239335673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742042_1218 (size=12454) 2024-12-03T15:21:15,686 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:15,692 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412033271e4293d844b6c943abd9fe05f3c90_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412033271e4293d844b6c943abd9fe05f3c90_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:15,694 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/686b1a7f65aa44bb927f09bb88b0b8e6, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:15,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/686b1a7f65aa44bb927f09bb88b0b8e6 is 175, key is test_row_0/A:col10/1733239275623/Put/seqid=0 2024-12-03T15:21:15,697 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:15,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:15,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:15,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:15,698 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742043_1219 (size=31255) 2024-12-03T15:21:15,725 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=362, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/686b1a7f65aa44bb927f09bb88b0b8e6 2024-12-03T15:21:15,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/425188504d44442da766b298402f267b is 50, key is test_row_0/B:col10/1733239275623/Put/seqid=0 2024-12-03T15:21:15,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239335774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239335775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239335776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239335776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742044_1220 (size=12301) 2024-12-03T15:21:15,850 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:15,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:15,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239335978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239335978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239335979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:15,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:15,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239335981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,003 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:16,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:16,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-03T15:21:16,158 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,159 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:16,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:16,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,159 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,197 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/425188504d44442da766b298402f267b 2024-12-03T15:21:16,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/0a26affed68940a58c4e98ce96f49aaf is 50, key is test_row_0/C:col10/1733239275623/Put/seqid=0 2024-12-03T15:21:16,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742045_1221 (size=12301) 2024-12-03T15:21:16,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:16,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239336284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:16,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239336285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:16,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239336286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:16,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239336287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,311 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:16,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:16,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,313 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,474 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:16,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:16,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/0a26affed68940a58c4e98ce96f49aaf 2024-12-03T15:21:16,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/686b1a7f65aa44bb927f09bb88b0b8e6 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/686b1a7f65aa44bb927f09bb88b0b8e6 2024-12-03T15:21:16,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/686b1a7f65aa44bb927f09bb88b0b8e6, entries=150, sequenceid=362, filesize=30.5 K 2024-12-03T15:21:16,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/425188504d44442da766b298402f267b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/425188504d44442da766b298402f267b 2024-12-03T15:21:16,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/425188504d44442da766b298402f267b, entries=150, sequenceid=362, filesize=12.0 K 2024-12-03T15:21:16,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/0a26affed68940a58c4e98ce96f49aaf as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/0a26affed68940a58c4e98ce96f49aaf 2024-12-03T15:21:16,641 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:16,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:16,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,645 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/0a26affed68940a58c4e98ce96f49aaf, entries=150, sequenceid=362, filesize=12.0 K 2024-12-03T15:21:16,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 1023ms, sequenceid=362, compaction requested=true 2024-12-03T15:21:16,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:16,647 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:16,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,649 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103175 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:16,649 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/A is initiating minor compaction (all files) 2024-12-03T15:21:16,649 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/A in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,649 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/c7d1d69d6a4d4d2f88a4600eaa23ef4b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3632e46c3d7d4612a77459cedef72e01, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/686b1a7f65aa44bb927f09bb88b0b8e6] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=100.8 K 2024-12-03T15:21:16,649 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,649 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/c7d1d69d6a4d4d2f88a4600eaa23ef4b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3632e46c3d7d4612a77459cedef72e01, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/686b1a7f65aa44bb927f09bb88b0b8e6] 2024-12-03T15:21:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:16,678 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:16,678 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:16,680 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7d1d69d6a4d4d2f88a4600eaa23ef4b, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733239272575 2024-12-03T15:21:16,681 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:16,681 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/B is initiating minor compaction (all files) 2024-12-03T15:21:16,681 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/B in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,681 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/22dd8c86bfa8469b9768e2ef37174a56, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/378cfe87311e497f9681be50d48e536d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/425188504d44442da766b298402f267b] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=36.7 K 2024-12-03T15:21:16,681 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 22dd8c86bfa8469b9768e2ef37174a56, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733239272575 2024-12-03T15:21:16,682 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 378cfe87311e497f9681be50d48e536d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733239273224 2024-12-03T15:21:16,682 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 425188504d44442da766b298402f267b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1733239275003 2024-12-03T15:21:16,683 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3632e46c3d7d4612a77459cedef72e01, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733239273224 2024-12-03T15:21:16,683 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 686b1a7f65aa44bb927f09bb88b0b8e6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1733239275003 2024-12-03T15:21:16,706 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#B#compaction#180 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:16,706 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/2f4148f6d4e24105b4e2fac264d467ec is 50, key is test_row_0/B:col10/1733239275623/Put/seqid=0 2024-12-03T15:21:16,708 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:16,742 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120321ea8a8275e64d57a27b67022fe1ec05_9cb1ca4e5b5289fcc2a0bafc5801cb91 store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:16,744 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120321ea8a8275e64d57a27b67022fe1ec05_9cb1ca4e5b5289fcc2a0bafc5801cb91, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:16,744 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120321ea8a8275e64d57a27b67022fe1ec05_9cb1ca4e5b5289fcc2a0bafc5801cb91 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:16,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742046_1222 (size=13119) 2024-12-03T15:21:16,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742047_1223 (size=4469) 2024-12-03T15:21:16,783 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#A#compaction#181 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:16,784 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/7c81f7ecac5b4dc6a057c99c530c95a9 is 175, key is test_row_0/A:col10/1733239275623/Put/seqid=0 2024-12-03T15:21:16,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:16,792 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:21:16,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:16,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:16,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:16,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:16,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:16,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:16,799 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:16,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:16,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742048_1224 (size=32073) 2024-12-03T15:21:16,826 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/7c81f7ecac5b4dc6a057c99c530c95a9 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7c81f7ecac5b4dc6a057c99c530c95a9 2024-12-03T15:21:16,833 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/A of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 7c81f7ecac5b4dc6a057c99c530c95a9(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:16,833 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:16,833 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/A, priority=13, startTime=1733239276647; duration=0sec 2024-12-03T15:21:16,833 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:16,833 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:A 2024-12-03T15:21:16,833 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:16,835 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:16,835 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/C is initiating minor compaction (all files) 2024-12-03T15:21:16,835 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/C in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,835 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4ebe1bf939aa43b6aa683ecaf0434bc3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/44fef929c17541029ba0e8f81e866afc, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/0a26affed68940a58c4e98ce96f49aaf] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=36.7 K 2024-12-03T15:21:16,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412038995a43f133840f3a3d886345a9d0e4c_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239275670/Put/seqid=0 2024-12-03T15:21:16,837 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ebe1bf939aa43b6aa683ecaf0434bc3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1733239272575 2024-12-03T15:21:16,837 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44fef929c17541029ba0e8f81e866afc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733239273224 2024-12-03T15:21:16,838 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a26affed68940a58c4e98ce96f49aaf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1733239275003 2024-12-03T15:21:16,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:16,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239336848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:16,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239336851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:16,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239336854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:16,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239336854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,868 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#C#compaction#183 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:16,869 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/766af67ccc7745e6a776174fa7c2c61f is 50, key is test_row_0/C:col10/1733239275623/Put/seqid=0 2024-12-03T15:21:16,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742049_1225 (size=12454) 2024-12-03T15:21:16,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742050_1226 (size=13119) 2024-12-03T15:21:16,953 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:16,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:16,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:16,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:16,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:16,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239336956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:16,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239336963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:16,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239336963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:16,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:16,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239336963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-03T15:21:17,111 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:17,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:17,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:17,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:17,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239337164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239337168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239337169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239337169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,175 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/2f4148f6d4e24105b4e2fac264d467ec as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/2f4148f6d4e24105b4e2fac264d467ec 2024-12-03T15:21:17,184 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/B of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 2f4148f6d4e24105b4e2fac264d467ec(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:17,184 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:17,184 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/B, priority=13, startTime=1733239276678; duration=0sec 2024-12-03T15:21:17,185 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:17,185 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:B 2024-12-03T15:21:17,266 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,266 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:17,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:17,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:17,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:17,267 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,290 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:17,296 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412038995a43f133840f3a3d886345a9d0e4c_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412038995a43f133840f3a3d886345a9d0e4c_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:17,297 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/b0664cfd1ed94b6d9f90dad5053eb1b8, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:17,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/b0664cfd1ed94b6d9f90dad5053eb1b8 is 175, key is test_row_0/A:col10/1733239275670/Put/seqid=0 2024-12-03T15:21:17,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742051_1227 (size=31255) 2024-12-03T15:21:17,316 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=373, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/b0664cfd1ed94b6d9f90dad5053eb1b8 2024-12-03T15:21:17,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/c81495f50a7243b7abc96237d03269c4 is 50, key is test_row_0/B:col10/1733239275670/Put/seqid=0 2024-12-03T15:21:17,338 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/766af67ccc7745e6a776174fa7c2c61f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/766af67ccc7745e6a776174fa7c2c61f 2024-12-03T15:21:17,345 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/C of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 766af67ccc7745e6a776174fa7c2c61f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:17,345 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:17,345 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/C, priority=13, startTime=1733239276678; duration=0sec 2024-12-03T15:21:17,345 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:17,345 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:C 2024-12-03T15:21:17,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742052_1228 (size=12301) 2024-12-03T15:21:17,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/c81495f50a7243b7abc96237d03269c4 2024-12-03T15:21:17,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/cdcdd4b7ba374886ab27bb0671670a78 is 50, key is test_row_0/C:col10/1733239275670/Put/seqid=0 2024-12-03T15:21:17,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742053_1229 (size=12301) 2024-12-03T15:21:17,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57170 deadline: 1733239337378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,379 DEBUG [Thread-630 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:21:17,421 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:17,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:17,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:17,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:17,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239337467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239337469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239337471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239337472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,575 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:17,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:17,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:17,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:17,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,730 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:17,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:17,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:17,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:17,730 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:17,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/cdcdd4b7ba374886ab27bb0671670a78 2024-12-03T15:21:17,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/b0664cfd1ed94b6d9f90dad5053eb1b8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0664cfd1ed94b6d9f90dad5053eb1b8 2024-12-03T15:21:17,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0664cfd1ed94b6d9f90dad5053eb1b8, entries=150, sequenceid=373, filesize=30.5 K 2024-12-03T15:21:17,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/c81495f50a7243b7abc96237d03269c4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/c81495f50a7243b7abc96237d03269c4 2024-12-03T15:21:17,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/c81495f50a7243b7abc96237d03269c4, entries=150, sequenceid=373, filesize=12.0 K 2024-12-03T15:21:17,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/cdcdd4b7ba374886ab27bb0671670a78 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/cdcdd4b7ba374886ab27bb0671670a78 2024-12-03T15:21:17,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/cdcdd4b7ba374886ab27bb0671670a78, entries=150, sequenceid=373, filesize=12.0 K 2024-12-03T15:21:17,810 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 1019ms, sequenceid=373, compaction requested=false 2024-12-03T15:21:17,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:17,882 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-03T15:21:17,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:17,883 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-03T15:21:17,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:17,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:17,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:17,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:17,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:17,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:17,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412034acb0b0154f947a8957a3afdb1d97ae7_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239276845/Put/seqid=0 2024-12-03T15:21:17,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742054_1230 (size=12454) 2024-12-03T15:21:17,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:17,937 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412034acb0b0154f947a8957a3afdb1d97ae7_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034acb0b0154f947a8957a3afdb1d97ae7_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:17,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/93d31c280d724b96a7534d290d3f115b, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:17,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/93d31c280d724b96a7534d290d3f115b is 175, key is test_row_0/A:col10/1733239276845/Put/seqid=0 2024-12-03T15:21:17,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742055_1231 (size=31255) 2024-12-03T15:21:17,964 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=402, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/93d31c280d724b96a7534d290d3f115b 2024-12-03T15:21:17,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:17,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:17,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/72347173557d4abd92caab28ec2e6c81 is 50, key is test_row_0/B:col10/1733239276845/Put/seqid=0 2024-12-03T15:21:17,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742056_1232 (size=12301) 2024-12-03T15:21:17,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239337981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239337982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239337983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:17,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:17,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239337986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:18,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239338088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:18,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239338090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:18,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239338101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:18,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239338102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:18,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239338291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:18,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239338292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:18,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:18,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239338308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239338308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,390 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/72347173557d4abd92caab28ec2e6c81 2024-12-03T15:21:18,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/4083d1c374c3410590fe581861094fa3 is 50, key is test_row_0/C:col10/1733239276845/Put/seqid=0 2024-12-03T15:21:18,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742057_1233 (size=12301) 2024-12-03T15:21:18,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:18,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239338596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:18,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239338598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:18,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239338613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:18,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239338613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:18,808 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/4083d1c374c3410590fe581861094fa3 2024-12-03T15:21:18,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/93d31c280d724b96a7534d290d3f115b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/93d31c280d724b96a7534d290d3f115b 2024-12-03T15:21:18,817 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/93d31c280d724b96a7534d290d3f115b, entries=150, sequenceid=402, filesize=30.5 K 2024-12-03T15:21:18,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/72347173557d4abd92caab28ec2e6c81 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/72347173557d4abd92caab28ec2e6c81 2024-12-03T15:21:18,822 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/72347173557d4abd92caab28ec2e6c81, entries=150, sequenceid=402, filesize=12.0 K 2024-12-03T15:21:18,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/4083d1c374c3410590fe581861094fa3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4083d1c374c3410590fe581861094fa3 2024-12-03T15:21:18,831 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4083d1c374c3410590fe581861094fa3, entries=150, sequenceid=402, filesize=12.0 K 2024-12-03T15:21:18,832 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 949ms, sequenceid=402, compaction requested=true 2024-12-03T15:21:18,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:18,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:18,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-03T15:21:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-03T15:21:18,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-03T15:21:18,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.9070 sec 2024-12-03T15:21:18,837 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 3.9150 sec 2024-12-03T15:21:19,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-03T15:21:19,031 INFO [Thread-634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-03T15:21:19,032 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:19,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-03T15:21:19,034 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:19,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-03T15:21:19,035 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:19,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:19,103 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-03T15:21:19,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:19,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:19,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:19,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:19,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:19,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:19,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203022878ac06b3475f94d949a765200236_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239279101/Put/seqid=0 2024-12-03T15:21:19,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742058_1234 (size=12454) 2024-12-03T15:21:19,124 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:19,129 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203022878ac06b3475f94d949a765200236_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203022878ac06b3475f94d949a765200236_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:19,130 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/64daf852e51c4740a546ec1e6ef1ca47, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:19,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/64daf852e51c4740a546ec1e6ef1ca47 is 175, key is test_row_0/A:col10/1733239279101/Put/seqid=0 2024-12-03T15:21:19,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742059_1235 (size=31255) 2024-12-03T15:21:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-03T15:21:19,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239339133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239339133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239339133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,139 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=414, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/64daf852e51c4740a546ec1e6ef1ca47 2024-12-03T15:21:19,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239339134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/0b33200ba93c415cbafcee1fc6b94782 is 50, key is test_row_0/B:col10/1733239279101/Put/seqid=0 2024-12-03T15:21:19,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742060_1236 (size=12301) 2024-12-03T15:21:19,187 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,187 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-03T15:21:19,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:19,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,188 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,196 DEBUG [Thread-637 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14c16cd4 to 127.0.0.1:60989 2024-12-03T15:21:19,198 DEBUG [Thread-635 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78cafade to 127.0.0.1:60989 2024-12-03T15:21:19,198 DEBUG [Thread-635 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:19,199 DEBUG [Thread-637 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:19,200 DEBUG [Thread-641 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26b120d9 to 127.0.0.1:60989 2024-12-03T15:21:19,200 DEBUG [Thread-641 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:19,202 DEBUG [Thread-639 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0341384e to 127.0.0.1:60989 2024-12-03T15:21:19,202 DEBUG [Thread-639 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:19,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239339237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239339237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239339237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239339240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-03T15:21:19,340 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-03T15:21:19,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:19,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239339438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239339439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239339439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239339442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,492 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-03T15:21:19,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:19,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/0b33200ba93c415cbafcee1fc6b94782 2024-12-03T15:21:19,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/da91c626e21f4cb2bb120705e0f142c2 is 50, key is test_row_0/C:col10/1733239279101/Put/seqid=0 2024-12-03T15:21:19,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742061_1237 (size=12301) 2024-12-03T15:21:19,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-03T15:21:19,645 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-03T15:21:19,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:19,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,646 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57234 deadline: 1733239339741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57218 deadline: 1733239339742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57194 deadline: 1733239339742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:19,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57140 deadline: 1733239339745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,798 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-03T15:21:19,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:19,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,950 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:19,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-03T15:21:19,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:19,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:19,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/da91c626e21f4cb2bb120705e0f142c2 2024-12-03T15:21:19,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/64daf852e51c4740a546ec1e6ef1ca47 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/64daf852e51c4740a546ec1e6ef1ca47 2024-12-03T15:21:19,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/64daf852e51c4740a546ec1e6ef1ca47, entries=150, sequenceid=414, filesize=30.5 K 2024-12-03T15:21:19,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/0b33200ba93c415cbafcee1fc6b94782 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/0b33200ba93c415cbafcee1fc6b94782 2024-12-03T15:21:19,977 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/0b33200ba93c415cbafcee1fc6b94782, entries=150, sequenceid=414, filesize=12.0 K 2024-12-03T15:21:19,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/da91c626e21f4cb2bb120705e0f142c2 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/da91c626e21f4cb2bb120705e0f142c2 2024-12-03T15:21:19,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/da91c626e21f4cb2bb120705e0f142c2, entries=150, sequenceid=414, filesize=12.0 K 2024-12-03T15:21:19,981 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 879ms, sequenceid=414, compaction requested=true 2024-12-03T15:21:19,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:19,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:19,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:19,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:19,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:19,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9cb1ca4e5b5289fcc2a0bafc5801cb91:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:19,982 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:19,982 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:19,982 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:19,983 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125838 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:19,983 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:19,983 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/A is initiating minor compaction (all files) 2024-12-03T15:21:19,983 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/A in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,983 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/B is initiating minor compaction (all files) 2024-12-03T15:21:19,983 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/B in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,983 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7c81f7ecac5b4dc6a057c99c530c95a9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0664cfd1ed94b6d9f90dad5053eb1b8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/93d31c280d724b96a7534d290d3f115b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/64daf852e51c4740a546ec1e6ef1ca47] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=122.9 K 2024-12-03T15:21:19,983 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:19,983 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7c81f7ecac5b4dc6a057c99c530c95a9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0664cfd1ed94b6d9f90dad5053eb1b8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/93d31c280d724b96a7534d290d3f115b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/64daf852e51c4740a546ec1e6ef1ca47] 2024-12-03T15:21:19,983 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/2f4148f6d4e24105b4e2fac264d467ec, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/c81495f50a7243b7abc96237d03269c4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/72347173557d4abd92caab28ec2e6c81, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/0b33200ba93c415cbafcee1fc6b94782] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=48.8 K 2024-12-03T15:21:19,984 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c81f7ecac5b4dc6a057c99c530c95a9, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1733239275003 2024-12-03T15:21:19,984 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f4148f6d4e24105b4e2fac264d467ec, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1733239275003 2024-12-03T15:21:19,984 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0664cfd1ed94b6d9f90dad5053eb1b8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733239275670 2024-12-03T15:21:19,984 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c81495f50a7243b7abc96237d03269c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733239275670 2024-12-03T15:21:19,984 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93d31c280d724b96a7534d290d3f115b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1733239276845 2024-12-03T15:21:19,984 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 72347173557d4abd92caab28ec2e6c81, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1733239276845 2024-12-03T15:21:19,985 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b33200ba93c415cbafcee1fc6b94782, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1733239277979 2024-12-03T15:21:19,985 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64daf852e51c4740a546ec1e6ef1ca47, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1733239277979 2024-12-03T15:21:19,995 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#B#compaction#192 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:19,996 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:19,996 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/a111d4b2bfa0472abacf768d68381d69 is 50, key is test_row_0/B:col10/1733239279101/Put/seqid=0 2024-12-03T15:21:19,998 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412034086f4fef2b04f95b1c3bc2e2c1ac0ce_9cb1ca4e5b5289fcc2a0bafc5801cb91 store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:20,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742062_1238 (size=13255) 2024-12-03T15:21:20,007 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/a111d4b2bfa0472abacf768d68381d69 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/a111d4b2bfa0472abacf768d68381d69 2024-12-03T15:21:20,011 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/B of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into a111d4b2bfa0472abacf768d68381d69(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:20,011 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:20,011 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/B, priority=12, startTime=1733239279982; duration=0sec 2024-12-03T15:21:20,011 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:20,011 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:B 2024-12-03T15:21:20,011 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:20,012 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:20,013 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 9cb1ca4e5b5289fcc2a0bafc5801cb91/C is initiating minor compaction (all files) 2024-12-03T15:21:20,013 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9cb1ca4e5b5289fcc2a0bafc5801cb91/C in TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:20,013 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/766af67ccc7745e6a776174fa7c2c61f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/cdcdd4b7ba374886ab27bb0671670a78, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4083d1c374c3410590fe581861094fa3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/da91c626e21f4cb2bb120705e0f142c2] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp, totalSize=48.8 K 2024-12-03T15:21:20,013 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 766af67ccc7745e6a776174fa7c2c61f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1733239275003 2024-12-03T15:21:20,013 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting cdcdd4b7ba374886ab27bb0671670a78, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=373, earliestPutTs=1733239275670 2024-12-03T15:21:20,014 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 4083d1c374c3410590fe581861094fa3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1733239276845 2024-12-03T15:21:20,014 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting da91c626e21f4cb2bb120705e0f142c2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1733239277979 2024-12-03T15:21:20,026 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412034086f4fef2b04f95b1c3bc2e2c1ac0ce_9cb1ca4e5b5289fcc2a0bafc5801cb91, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:20,026 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412034086f4fef2b04f95b1c3bc2e2c1ac0ce_9cb1ca4e5b5289fcc2a0bafc5801cb91 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:20,029 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#C#compaction#194 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:20,029 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/badcbc3a6a7f44e6893b1e9f57e51005 is 50, key is test_row_0/C:col10/1733239279101/Put/seqid=0 2024-12-03T15:21:20,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742063_1239 (size=4469) 2024-12-03T15:21:20,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742064_1240 (size=13255) 2024-12-03T15:21:20,103 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:20,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-03T15:21:20,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:20,104 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-03T15:21:20,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:20,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:20,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:20,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:20,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:20,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:20,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412036db5749c46664cb5baada0533a26b65d_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_0/A:col10/1733239279132/Put/seqid=0 2024-12-03T15:21:20,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742065_1241 (size=12454) 2024-12-03T15:21:20,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:20,123 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412036db5749c46664cb5baada0533a26b65d_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412036db5749c46664cb5baada0533a26b65d_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:20,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/ddd2e97bba43452db05422f277d101f8, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:20,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/ddd2e97bba43452db05422f277d101f8 is 175, key is test_row_0/A:col10/1733239279132/Put/seqid=0 2024-12-03T15:21:20,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742066_1242 (size=31255) 2024-12-03T15:21:20,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-03T15:21:20,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:20,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. as already flushing 2024-12-03T15:21:20,248 DEBUG [Thread-632 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2885d2d9 to 127.0.0.1:60989 2024-12-03T15:21:20,248 DEBUG [Thread-628 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x190853fc to 127.0.0.1:60989 2024-12-03T15:21:20,248 DEBUG [Thread-626 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x491ea2ee to 127.0.0.1:60989 2024-12-03T15:21:20,248 DEBUG [Thread-628 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:20,248 DEBUG [Thread-632 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:20,248 DEBUG [Thread-626 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:20,249 DEBUG [Thread-624 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2931c73e to 127.0.0.1:60989 2024-12-03T15:21:20,249 DEBUG [Thread-624 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:20,445 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9cb1ca4e5b5289fcc2a0bafc5801cb91#A#compaction#193 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:20,445 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/7c8e3a4a9ee5474b8c36301945e19d64 is 175, key is test_row_0/A:col10/1733239279101/Put/seqid=0 2024-12-03T15:21:20,448 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/badcbc3a6a7f44e6893b1e9f57e51005 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/badcbc3a6a7f44e6893b1e9f57e51005 2024-12-03T15:21:20,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742067_1243 (size=32209) 2024-12-03T15:21:20,452 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/C of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into badcbc3a6a7f44e6893b1e9f57e51005(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:20,453 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:20,453 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/C, priority=12, startTime=1733239279982; duration=0sec 2024-12-03T15:21:20,453 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:20,453 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:C 2024-12-03T15:21:20,529 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=439, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/ddd2e97bba43452db05422f277d101f8 2024-12-03T15:21:20,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/ac2748a8c2f14c6897d35391fb3797e1 is 50, key is test_row_0/B:col10/1733239279132/Put/seqid=0 2024-12-03T15:21:20,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742068_1244 (size=12301) 2024-12-03T15:21:20,854 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/7c8e3a4a9ee5474b8c36301945e19d64 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7c8e3a4a9ee5474b8c36301945e19d64 2024-12-03T15:21:20,858 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9cb1ca4e5b5289fcc2a0bafc5801cb91/A of 9cb1ca4e5b5289fcc2a0bafc5801cb91 into 7c8e3a4a9ee5474b8c36301945e19d64(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:20,858 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:20,858 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91., storeName=9cb1ca4e5b5289fcc2a0bafc5801cb91/A, priority=12, startTime=1733239279982; duration=0sec 2024-12-03T15:21:20,859 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:20,859 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9cb1ca4e5b5289fcc2a0bafc5801cb91:A 2024-12-03T15:21:20,941 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/ac2748a8c2f14c6897d35391fb3797e1 2024-12-03T15:21:20,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/a45ce33fbc694e04b50e5cdf29e61908 is 50, key is test_row_0/C:col10/1733239279132/Put/seqid=0 2024-12-03T15:21:20,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742069_1245 (size=12301) 2024-12-03T15:21:21,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-03T15:21:21,352 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/a45ce33fbc694e04b50e5cdf29e61908 2024-12-03T15:21:21,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/ddd2e97bba43452db05422f277d101f8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/ddd2e97bba43452db05422f277d101f8 2024-12-03T15:21:21,361 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/ddd2e97bba43452db05422f277d101f8, entries=150, sequenceid=439, filesize=30.5 K 2024-12-03T15:21:21,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/ac2748a8c2f14c6897d35391fb3797e1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/ac2748a8c2f14c6897d35391fb3797e1 2024-12-03T15:21:21,365 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/ac2748a8c2f14c6897d35391fb3797e1, entries=150, sequenceid=439, filesize=12.0 K 2024-12-03T15:21:21,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/a45ce33fbc694e04b50e5cdf29e61908 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/a45ce33fbc694e04b50e5cdf29e61908 2024-12-03T15:21:21,370 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/a45ce33fbc694e04b50e5cdf29e61908, entries=150, sequenceid=439, filesize=12.0 K 2024-12-03T15:21:21,371 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=26.84 KB/27480 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 1267ms, sequenceid=439, compaction requested=false 2024-12-03T15:21:21,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:21,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:21,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-03T15:21:21,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-03T15:21:21,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-03T15:21:21,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3370 sec 2024-12-03T15:21:21,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 2.3420 sec 2024-12-03T15:21:21,416 DEBUG [Thread-630 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46114993 to 127.0.0.1:60989 2024-12-03T15:21:21,416 DEBUG [Thread-630 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:23,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-03T15:21:23,140 INFO [Thread-634 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5101 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5015 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2089 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6267 rows 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2098 2024-12-03T15:21:23,140 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6294 rows 2024-12-03T15:21:23,140 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-03T15:21:23,140 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d29de25 to 127.0.0.1:60989 2024-12-03T15:21:23,140 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:23,145 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-03T15:21:23,145 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-03T15:21:23,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:23,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-03T15:21:23,149 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239283149"}]},"ts":"1733239283149"} 2024-12-03T15:21:23,150 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-03T15:21:23,153 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-03T15:21:23,153 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-03T15:21:23,154 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9cb1ca4e5b5289fcc2a0bafc5801cb91, UNASSIGN}] 2024-12-03T15:21:23,155 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=57, ppid=56, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9cb1ca4e5b5289fcc2a0bafc5801cb91, UNASSIGN 2024-12-03T15:21:23,156 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=57 updating hbase:meta row=9cb1ca4e5b5289fcc2a0bafc5801cb91, regionState=CLOSING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:23,157 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T15:21:23,157 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; CloseRegionProcedure 9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:21:23,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-03T15:21:23,308 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:23,309 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] handler.UnassignRegionHandler(124): Close 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:23,309 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-03T15:21:23,309 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1681): Closing 9cb1ca4e5b5289fcc2a0bafc5801cb91, disabling compactions & flushes 2024-12-03T15:21:23,309 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:23,309 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:23,309 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. after waiting 0 ms 2024-12-03T15:21:23,309 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:23,309 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(2837): Flushing 9cb1ca4e5b5289fcc2a0bafc5801cb91 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-03T15:21:23,309 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=A 2024-12-03T15:21:23,309 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:23,309 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=B 2024-12-03T15:21:23,309 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:23,310 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9cb1ca4e5b5289fcc2a0bafc5801cb91, store=C 2024-12-03T15:21:23,310 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:23,317 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203be0fb9bb83114499807bb976fb3bab14_9cb1ca4e5b5289fcc2a0bafc5801cb91 is 50, key is test_row_1/A:col10/1733239281415/Put/seqid=0 2024-12-03T15:21:23,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742070_1246 (size=9914) 2024-12-03T15:21:23,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-03T15:21:23,722 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:23,726 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203be0fb9bb83114499807bb976fb3bab14_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203be0fb9bb83114499807bb976fb3bab14_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:23,727 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/7ef9f587109f44a18ba0bba308bfb8cb, store: [table=TestAcidGuarantees family=A region=9cb1ca4e5b5289fcc2a0bafc5801cb91] 2024-12-03T15:21:23,727 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/7ef9f587109f44a18ba0bba308bfb8cb is 175, key is test_row_1/A:col10/1733239281415/Put/seqid=0 2024-12-03T15:21:23,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742071_1247 (size=22561) 2024-12-03T15:21:23,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-03T15:21:24,132 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=449, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/7ef9f587109f44a18ba0bba308bfb8cb 2024-12-03T15:21:24,138 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/d63f223c458946558f62f42a0485341b is 50, key is test_row_1/B:col10/1733239281415/Put/seqid=0 2024-12-03T15:21:24,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742072_1248 (size=9857) 2024-12-03T15:21:24,142 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/d63f223c458946558f62f42a0485341b 2024-12-03T15:21:24,149 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/c8b08e7589fd4f2b864c41d8df17c0ff is 50, key is test_row_1/C:col10/1733239281415/Put/seqid=0 2024-12-03T15:21:24,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742073_1249 (size=9857) 2024-12-03T15:21:24,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-03T15:21:24,554 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/c8b08e7589fd4f2b864c41d8df17c0ff 2024-12-03T15:21:24,558 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/A/7ef9f587109f44a18ba0bba308bfb8cb as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7ef9f587109f44a18ba0bba308bfb8cb 2024-12-03T15:21:24,561 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7ef9f587109f44a18ba0bba308bfb8cb, entries=100, sequenceid=449, filesize=22.0 K 2024-12-03T15:21:24,562 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/B/d63f223c458946558f62f42a0485341b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/d63f223c458946558f62f42a0485341b 2024-12-03T15:21:24,565 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/d63f223c458946558f62f42a0485341b, entries=100, sequenceid=449, filesize=9.6 K 2024-12-03T15:21:24,566 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/.tmp/C/c8b08e7589fd4f2b864c41d8df17c0ff as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/c8b08e7589fd4f2b864c41d8df17c0ff 2024-12-03T15:21:24,569 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/c8b08e7589fd4f2b864c41d8df17c0ff, entries=100, sequenceid=449, filesize=9.6 K 2024-12-03T15:21:24,570 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 9cb1ca4e5b5289fcc2a0bafc5801cb91 in 1260ms, sequenceid=449, compaction requested=true 2024-12-03T15:21:24,570 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/433a74e1f2e54ffcbb0e51a0fc94dd1d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/94fe0164503f44689270a692f2aa9944, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/408d7fcad34847b28645a8dfa9d2614b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/2290a360b5fd447bb86f4904772b5ba0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9a14ff5672ec4dadbe335023bb9d0d26, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/072a26d4ec5f4e35b6b8344926b49e4d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7e02c0ad37f24498a5bd7b65b1931a90, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9fd126a698b342a1b36b2c33758ded19, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3f1c81f4eb5949cb9517f71d18f7bdb8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9508a975d7c94d2ca91d61b28327bb4c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/4486fe8128af4a9b80a5475dc6f70099, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0c294f50c654decb15f2b4f4edc8b1a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/e6d788cc834c4e7abd85e3220b3ba3c0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0966d13702914586ab2a062a69601b45, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/78638d74317849b5a18bd63ed628da5b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/d9845c359a6b44ba9853776cd51bfbd0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0097baedd2a04817b4713a4fe68eda54, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/bd28e0ee9cc94d1296d385ce6523b4d2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/59bff90381ec4ed1b07d8ebb4bcf159c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/1692b82813a64d068bcac28e9880565b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/70ee567867e34a19bb19817b15e2e604, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/c7d1d69d6a4d4d2f88a4600eaa23ef4b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3632e46c3d7d4612a77459cedef72e01, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7c81f7ecac5b4dc6a057c99c530c95a9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/686b1a7f65aa44bb927f09bb88b0b8e6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0664cfd1ed94b6d9f90dad5053eb1b8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/93d31c280d724b96a7534d290d3f115b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/64daf852e51c4740a546ec1e6ef1ca47] to archive 2024-12-03T15:21:24,571 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:21:24,573 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/433a74e1f2e54ffcbb0e51a0fc94dd1d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/433a74e1f2e54ffcbb0e51a0fc94dd1d 2024-12-03T15:21:24,574 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/94fe0164503f44689270a692f2aa9944 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/94fe0164503f44689270a692f2aa9944 2024-12-03T15:21:24,576 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/408d7fcad34847b28645a8dfa9d2614b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/408d7fcad34847b28645a8dfa9d2614b 2024-12-03T15:21:24,577 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/2290a360b5fd447bb86f4904772b5ba0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/2290a360b5fd447bb86f4904772b5ba0 2024-12-03T15:21:24,578 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9a14ff5672ec4dadbe335023bb9d0d26 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9a14ff5672ec4dadbe335023bb9d0d26 2024-12-03T15:21:24,579 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/072a26d4ec5f4e35b6b8344926b49e4d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/072a26d4ec5f4e35b6b8344926b49e4d 2024-12-03T15:21:24,580 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7e02c0ad37f24498a5bd7b65b1931a90 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7e02c0ad37f24498a5bd7b65b1931a90 2024-12-03T15:21:24,581 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9fd126a698b342a1b36b2c33758ded19 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9fd126a698b342a1b36b2c33758ded19 2024-12-03T15:21:24,582 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3f1c81f4eb5949cb9517f71d18f7bdb8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3f1c81f4eb5949cb9517f71d18f7bdb8 2024-12-03T15:21:24,583 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9508a975d7c94d2ca91d61b28327bb4c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/9508a975d7c94d2ca91d61b28327bb4c 2024-12-03T15:21:24,584 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/4486fe8128af4a9b80a5475dc6f70099 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/4486fe8128af4a9b80a5475dc6f70099 2024-12-03T15:21:24,585 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0c294f50c654decb15f2b4f4edc8b1a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0c294f50c654decb15f2b4f4edc8b1a 2024-12-03T15:21:24,586 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/e6d788cc834c4e7abd85e3220b3ba3c0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/e6d788cc834c4e7abd85e3220b3ba3c0 2024-12-03T15:21:24,587 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0966d13702914586ab2a062a69601b45 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0966d13702914586ab2a062a69601b45 2024-12-03T15:21:24,587 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/78638d74317849b5a18bd63ed628da5b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/78638d74317849b5a18bd63ed628da5b 2024-12-03T15:21:24,588 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/d9845c359a6b44ba9853776cd51bfbd0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/d9845c359a6b44ba9853776cd51bfbd0 2024-12-03T15:21:24,589 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0097baedd2a04817b4713a4fe68eda54 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/0097baedd2a04817b4713a4fe68eda54 2024-12-03T15:21:24,590 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/bd28e0ee9cc94d1296d385ce6523b4d2 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/bd28e0ee9cc94d1296d385ce6523b4d2 2024-12-03T15:21:24,591 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/59bff90381ec4ed1b07d8ebb4bcf159c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/59bff90381ec4ed1b07d8ebb4bcf159c 2024-12-03T15:21:24,592 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/1692b82813a64d068bcac28e9880565b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/1692b82813a64d068bcac28e9880565b 2024-12-03T15:21:24,593 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/70ee567867e34a19bb19817b15e2e604 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/70ee567867e34a19bb19817b15e2e604 2024-12-03T15:21:24,595 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/c7d1d69d6a4d4d2f88a4600eaa23ef4b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/c7d1d69d6a4d4d2f88a4600eaa23ef4b 2024-12-03T15:21:24,596 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3632e46c3d7d4612a77459cedef72e01 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/3632e46c3d7d4612a77459cedef72e01 2024-12-03T15:21:24,596 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7c81f7ecac5b4dc6a057c99c530c95a9 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7c81f7ecac5b4dc6a057c99c530c95a9 2024-12-03T15:21:24,597 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/686b1a7f65aa44bb927f09bb88b0b8e6 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/686b1a7f65aa44bb927f09bb88b0b8e6 2024-12-03T15:21:24,598 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0664cfd1ed94b6d9f90dad5053eb1b8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/b0664cfd1ed94b6d9f90dad5053eb1b8 2024-12-03T15:21:24,599 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/93d31c280d724b96a7534d290d3f115b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/93d31c280d724b96a7534d290d3f115b 2024-12-03T15:21:24,600 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/64daf852e51c4740a546ec1e6ef1ca47 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/64daf852e51c4740a546ec1e6ef1ca47 2024-12-03T15:21:24,601 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/9aa21d240b0b40c5ab92cbb82ed1430f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/ab5d649fcfe740c19c26758ad26d35ce, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/5c976c89f3424a9d91871f0e30076443, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/9b37596b6e214282870fb50e4600b87c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/fa25bb42f464472cb447a3ad1df9d6b3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/7f856b5814a74669902b8a6d2cee2040, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/72f470fda9354669b542b36edf2630e1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/d220a48c1f634a53a1694642a5e9b953, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/09326e17b1144aea9c5becd83ee5bcb3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/cf9e42e653744e9e8ac57848d1c19f4c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/82850905492f41fb942afc4e5fd1cf5d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/2d86c9118a9d44488090e0338cd07aad, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/abd2ffaa9a16486e90e5e8d02c379a5c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/7bd31faaa23f4b5687943c14f005eb4e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/5eaeafca6eb444c292cce9fe421ab833, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/c8a37f1eaa3648aab4f1f708ff626b12, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/750d4597e5bd4fbeab232d69bbfc824f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/40525988ade04520af123145262cce43, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/4bd7f32ce2fe40c094ec766c54772fd8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/4be6e188602e4ce38bd0442414a8e5f4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/22dd8c86bfa8469b9768e2ef37174a56, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/869427990f7a437183a58961412571d7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/378cfe87311e497f9681be50d48e536d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/2f4148f6d4e24105b4e2fac264d467ec, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/425188504d44442da766b298402f267b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/c81495f50a7243b7abc96237d03269c4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/72347173557d4abd92caab28ec2e6c81, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/0b33200ba93c415cbafcee1fc6b94782] to archive 2024-12-03T15:21:24,602 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:21:24,603 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/9aa21d240b0b40c5ab92cbb82ed1430f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/9aa21d240b0b40c5ab92cbb82ed1430f 2024-12-03T15:21:24,604 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/ab5d649fcfe740c19c26758ad26d35ce to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/ab5d649fcfe740c19c26758ad26d35ce 2024-12-03T15:21:24,605 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/5c976c89f3424a9d91871f0e30076443 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/5c976c89f3424a9d91871f0e30076443 2024-12-03T15:21:24,606 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/9b37596b6e214282870fb50e4600b87c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/9b37596b6e214282870fb50e4600b87c 2024-12-03T15:21:24,607 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/fa25bb42f464472cb447a3ad1df9d6b3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/fa25bb42f464472cb447a3ad1df9d6b3 2024-12-03T15:21:24,608 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/7f856b5814a74669902b8a6d2cee2040 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/7f856b5814a74669902b8a6d2cee2040 2024-12-03T15:21:24,610 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/72f470fda9354669b542b36edf2630e1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/72f470fda9354669b542b36edf2630e1 2024-12-03T15:21:24,611 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/d220a48c1f634a53a1694642a5e9b953 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/d220a48c1f634a53a1694642a5e9b953 2024-12-03T15:21:24,612 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/09326e17b1144aea9c5becd83ee5bcb3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/09326e17b1144aea9c5becd83ee5bcb3 2024-12-03T15:21:24,613 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/cf9e42e653744e9e8ac57848d1c19f4c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/cf9e42e653744e9e8ac57848d1c19f4c 2024-12-03T15:21:24,614 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/82850905492f41fb942afc4e5fd1cf5d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/82850905492f41fb942afc4e5fd1cf5d 2024-12-03T15:21:24,615 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/2d86c9118a9d44488090e0338cd07aad to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/2d86c9118a9d44488090e0338cd07aad 2024-12-03T15:21:24,616 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/abd2ffaa9a16486e90e5e8d02c379a5c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/abd2ffaa9a16486e90e5e8d02c379a5c 2024-12-03T15:21:24,617 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/7bd31faaa23f4b5687943c14f005eb4e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/7bd31faaa23f4b5687943c14f005eb4e 2024-12-03T15:21:24,618 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/5eaeafca6eb444c292cce9fe421ab833 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/5eaeafca6eb444c292cce9fe421ab833 2024-12-03T15:21:24,619 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/c8a37f1eaa3648aab4f1f708ff626b12 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/c8a37f1eaa3648aab4f1f708ff626b12 2024-12-03T15:21:24,620 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/750d4597e5bd4fbeab232d69bbfc824f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/750d4597e5bd4fbeab232d69bbfc824f 2024-12-03T15:21:24,622 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/40525988ade04520af123145262cce43 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/40525988ade04520af123145262cce43 2024-12-03T15:21:24,623 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/4bd7f32ce2fe40c094ec766c54772fd8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/4bd7f32ce2fe40c094ec766c54772fd8 2024-12-03T15:21:24,624 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/4be6e188602e4ce38bd0442414a8e5f4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/4be6e188602e4ce38bd0442414a8e5f4 2024-12-03T15:21:24,625 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/22dd8c86bfa8469b9768e2ef37174a56 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/22dd8c86bfa8469b9768e2ef37174a56 2024-12-03T15:21:24,626 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/869427990f7a437183a58961412571d7 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/869427990f7a437183a58961412571d7 2024-12-03T15:21:24,628 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/378cfe87311e497f9681be50d48e536d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/378cfe87311e497f9681be50d48e536d 2024-12-03T15:21:24,629 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/2f4148f6d4e24105b4e2fac264d467ec to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/2f4148f6d4e24105b4e2fac264d467ec 2024-12-03T15:21:24,630 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/425188504d44442da766b298402f267b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/425188504d44442da766b298402f267b 2024-12-03T15:21:24,631 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/c81495f50a7243b7abc96237d03269c4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/c81495f50a7243b7abc96237d03269c4 2024-12-03T15:21:24,632 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/72347173557d4abd92caab28ec2e6c81 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/72347173557d4abd92caab28ec2e6c81 2024-12-03T15:21:24,633 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/0b33200ba93c415cbafcee1fc6b94782 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/0b33200ba93c415cbafcee1fc6b94782 2024-12-03T15:21:24,634 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/90f1a832fa3446f8b1c10e7ebb187236, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/9f2571f2b6ad4a898c8d62b856765194, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/bd021f76dccf4eab9ba1c57742ca695d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/bf80d25df5164aa5b95a09b5526eff60, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/aae833c64287412cadb6d419fa949d1b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/c2ca57fb18cc4da5bf409a817e9eced7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/dcfb6a22d56943f3b73bf0c82544810b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/3ac97fd7d478471db63ad0c7fec153ba, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/7c48f5a384e74ff1830704299ecb0bb1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/ddff822ea81441e5b386897734fef4d8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/7d6b52b70d4342198a45d56886474f9f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b7686a3051c64ea48ebbcf8870661af4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b78eccadf6f144be87cb7060fbf21f1c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/403d04b519714379a16595f634ba60a6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/90f82e252a3e46e99d0b153029c9dd6c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b8912665372743fe917dd48bfa13d990, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/25a34a67abb44411b0e039be4e7add56, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b1239b681b754788a63e4363ce121728, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4da897ad0167477b915a35820f3f911d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/e9b0c553b67a4591bc4f4d45c65a2831, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4ebe1bf939aa43b6aa683ecaf0434bc3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/de2756d7490947708f0ee61c370c2654, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/44fef929c17541029ba0e8f81e866afc, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/766af67ccc7745e6a776174fa7c2c61f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/0a26affed68940a58c4e98ce96f49aaf, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/cdcdd4b7ba374886ab27bb0671670a78, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4083d1c374c3410590fe581861094fa3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/da91c626e21f4cb2bb120705e0f142c2] to archive 2024-12-03T15:21:24,635 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:21:24,637 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/90f1a832fa3446f8b1c10e7ebb187236 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/90f1a832fa3446f8b1c10e7ebb187236 2024-12-03T15:21:24,638 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/9f2571f2b6ad4a898c8d62b856765194 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/9f2571f2b6ad4a898c8d62b856765194 2024-12-03T15:21:24,639 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/bd021f76dccf4eab9ba1c57742ca695d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/bd021f76dccf4eab9ba1c57742ca695d 2024-12-03T15:21:24,640 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/bf80d25df5164aa5b95a09b5526eff60 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/bf80d25df5164aa5b95a09b5526eff60 2024-12-03T15:21:24,641 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/aae833c64287412cadb6d419fa949d1b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/aae833c64287412cadb6d419fa949d1b 2024-12-03T15:21:24,642 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/c2ca57fb18cc4da5bf409a817e9eced7 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/c2ca57fb18cc4da5bf409a817e9eced7 2024-12-03T15:21:24,643 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/dcfb6a22d56943f3b73bf0c82544810b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/dcfb6a22d56943f3b73bf0c82544810b 2024-12-03T15:21:24,644 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/3ac97fd7d478471db63ad0c7fec153ba to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/3ac97fd7d478471db63ad0c7fec153ba 2024-12-03T15:21:24,645 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/7c48f5a384e74ff1830704299ecb0bb1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/7c48f5a384e74ff1830704299ecb0bb1 2024-12-03T15:21:24,646 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/ddff822ea81441e5b386897734fef4d8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/ddff822ea81441e5b386897734fef4d8 2024-12-03T15:21:24,647 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/7d6b52b70d4342198a45d56886474f9f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/7d6b52b70d4342198a45d56886474f9f 2024-12-03T15:21:24,648 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b7686a3051c64ea48ebbcf8870661af4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b7686a3051c64ea48ebbcf8870661af4 2024-12-03T15:21:24,649 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b78eccadf6f144be87cb7060fbf21f1c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b78eccadf6f144be87cb7060fbf21f1c 2024-12-03T15:21:24,650 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/403d04b519714379a16595f634ba60a6 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/403d04b519714379a16595f634ba60a6 2024-12-03T15:21:24,651 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/90f82e252a3e46e99d0b153029c9dd6c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/90f82e252a3e46e99d0b153029c9dd6c 2024-12-03T15:21:24,652 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b8912665372743fe917dd48bfa13d990 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b8912665372743fe917dd48bfa13d990 2024-12-03T15:21:24,653 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/25a34a67abb44411b0e039be4e7add56 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/25a34a67abb44411b0e039be4e7add56 2024-12-03T15:21:24,654 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b1239b681b754788a63e4363ce121728 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/b1239b681b754788a63e4363ce121728 2024-12-03T15:21:24,655 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4da897ad0167477b915a35820f3f911d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4da897ad0167477b915a35820f3f911d 2024-12-03T15:21:24,656 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/e9b0c553b67a4591bc4f4d45c65a2831 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/e9b0c553b67a4591bc4f4d45c65a2831 2024-12-03T15:21:24,657 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4ebe1bf939aa43b6aa683ecaf0434bc3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4ebe1bf939aa43b6aa683ecaf0434bc3 2024-12-03T15:21:24,658 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/de2756d7490947708f0ee61c370c2654 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/de2756d7490947708f0ee61c370c2654 2024-12-03T15:21:24,659 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/44fef929c17541029ba0e8f81e866afc to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/44fef929c17541029ba0e8f81e866afc 2024-12-03T15:21:24,660 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/766af67ccc7745e6a776174fa7c2c61f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/766af67ccc7745e6a776174fa7c2c61f 2024-12-03T15:21:24,661 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/0a26affed68940a58c4e98ce96f49aaf to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/0a26affed68940a58c4e98ce96f49aaf 2024-12-03T15:21:24,662 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/cdcdd4b7ba374886ab27bb0671670a78 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/cdcdd4b7ba374886ab27bb0671670a78 2024-12-03T15:21:24,663 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4083d1c374c3410590fe581861094fa3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/4083d1c374c3410590fe581861094fa3 2024-12-03T15:21:24,664 DEBUG [StoreCloser-TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/da91c626e21f4cb2bb120705e0f142c2 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/da91c626e21f4cb2bb120705e0f142c2 2024-12-03T15:21:24,668 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/recovered.edits/452.seqid, newMaxSeqId=452, maxSeqId=4 2024-12-03T15:21:24,668 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91. 2024-12-03T15:21:24,669 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1635): Region close journal for 9cb1ca4e5b5289fcc2a0bafc5801cb91: 2024-12-03T15:21:24,670 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] handler.UnassignRegionHandler(170): Closed 9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:24,670 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=57 updating hbase:meta row=9cb1ca4e5b5289fcc2a0bafc5801cb91, regionState=CLOSED 2024-12-03T15:21:24,672 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-03T15:21:24,672 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; CloseRegionProcedure 9cb1ca4e5b5289fcc2a0bafc5801cb91, server=2b5ef621a0dd,46815,1733239226292 in 1.5140 sec 2024-12-03T15:21:24,673 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-03T15:21:24,673 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9cb1ca4e5b5289fcc2a0bafc5801cb91, UNASSIGN in 1.5180 sec 2024-12-03T15:21:24,674 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-03T15:21:24,674 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5200 sec 2024-12-03T15:21:24,675 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239284675"}]},"ts":"1733239284675"} 2024-12-03T15:21:24,676 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-03T15:21:24,678 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-03T15:21:24,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5330 sec 2024-12-03T15:21:24,682 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T15:21:25,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-03T15:21:25,252 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-03T15:21:25,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-03T15:21:25,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:25,254 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=59, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:25,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-03T15:21:25,255 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=59, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:25,256 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,259 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/recovered.edits] 2024-12-03T15:21:25,262 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7c8e3a4a9ee5474b8c36301945e19d64 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7c8e3a4a9ee5474b8c36301945e19d64 2024-12-03T15:21:25,263 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7ef9f587109f44a18ba0bba308bfb8cb to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/7ef9f587109f44a18ba0bba308bfb8cb 2024-12-03T15:21:25,264 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/ddd2e97bba43452db05422f277d101f8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/A/ddd2e97bba43452db05422f277d101f8 2024-12-03T15:21:25,267 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/a111d4b2bfa0472abacf768d68381d69 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/a111d4b2bfa0472abacf768d68381d69 2024-12-03T15:21:25,268 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/ac2748a8c2f14c6897d35391fb3797e1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/ac2748a8c2f14c6897d35391fb3797e1 2024-12-03T15:21:25,269 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/d63f223c458946558f62f42a0485341b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/B/d63f223c458946558f62f42a0485341b 2024-12-03T15:21:25,271 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/a45ce33fbc694e04b50e5cdf29e61908 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/a45ce33fbc694e04b50e5cdf29e61908 2024-12-03T15:21:25,272 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/badcbc3a6a7f44e6893b1e9f57e51005 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/badcbc3a6a7f44e6893b1e9f57e51005 2024-12-03T15:21:25,273 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/c8b08e7589fd4f2b864c41d8df17c0ff to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/C/c8b08e7589fd4f2b864c41d8df17c0ff 2024-12-03T15:21:25,276 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/recovered.edits/452.seqid to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91/recovered.edits/452.seqid 2024-12-03T15:21:25,277 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,277 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-03T15:21:25,278 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-03T15:21:25,278 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-03T15:21:25,282 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412030117e89aeffe4f059d965ea5043247d5_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412030117e89aeffe4f059d965ea5043247d5_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,284 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203022878ac06b3475f94d949a765200236_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203022878ac06b3475f94d949a765200236_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,285 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412030d93ea6459794627b4b3e90976dd0136_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412030d93ea6459794627b4b3e90976dd0136_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,286 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120325b8baeb88f24c8a94560caa0d388e08_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120325b8baeb88f24c8a94560caa0d388e08_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,287 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120330b21585141540efbc98d44664d5b654_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120330b21585141540efbc98d44664d5b654_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,288 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412033271e4293d844b6c943abd9fe05f3c90_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412033271e4293d844b6c943abd9fe05f3c90_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,289 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034142a46a1e414dfb9a159ba8b9985369_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034142a46a1e414dfb9a159ba8b9985369_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,290 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034acb0b0154f947a8957a3afdb1d97ae7_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034acb0b0154f947a8957a3afdb1d97ae7_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,291 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034d63e471683d4f1bac0e118c7076ffea_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034d63e471683d4f1bac0e118c7076ffea_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,292 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034eeec2cab9cb4912aefc810aedf5f1f9_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034eeec2cab9cb4912aefc810aedf5f1f9_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,294 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412035862bbb09e034aa799d25c1cdf887f38_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412035862bbb09e034aa799d25c1cdf887f38_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,295 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412036db5749c46664cb5baada0533a26b65d_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412036db5749c46664cb5baada0533a26b65d_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,296 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120374ae8e89d2a04bd2bcfc94dab58ea552_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120374ae8e89d2a04bd2bcfc94dab58ea552_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,297 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120379a28755050444f8a7bc0da517e90cf1_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120379a28755050444f8a7bc0da517e90cf1_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,299 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412037d752ed5f5d34f86b0bba64fdd597924_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412037d752ed5f5d34f86b0bba64fdd597924_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,300 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412038995a43f133840f3a3d886345a9d0e4c_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412038995a43f133840f3a3d886345a9d0e4c_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,301 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412038c50b528b1eb4c19a21dc47be41db60f_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412038c50b528b1eb4c19a21dc47be41db60f_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,302 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412038e6ada6154f041e9ae93e583d0fccca6_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412038e6ada6154f041e9ae93e583d0fccca6_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,303 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120397951e7de8b442d4b8a6e6b08804c2c8_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120397951e7de8b442d4b8a6e6b08804c2c8_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,304 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203ae48f887a661453e81789b4961622898_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203ae48f887a661453e81789b4961622898_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,306 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203bd9a5aaac32b4081869b4b8620433913_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203bd9a5aaac32b4081869b4b8620433913_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,307 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203be0fb9bb83114499807bb976fb3bab14_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203be0fb9bb83114499807bb976fb3bab14_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,308 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203d61826664f274b12aacab8c0ef3cb402_9cb1ca4e5b5289fcc2a0bafc5801cb91 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203d61826664f274b12aacab8c0ef3cb402_9cb1ca4e5b5289fcc2a0bafc5801cb91 2024-12-03T15:21:25,308 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-03T15:21:25,311 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=59, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:25,313 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-03T15:21:25,316 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-03T15:21:25,317 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=59, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:25,317 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-03T15:21:25,317 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733239285317"}]},"ts":"9223372036854775807"} 2024-12-03T15:21:25,319 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-03T15:21:25,319 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9cb1ca4e5b5289fcc2a0bafc5801cb91, NAME => 'TestAcidGuarantees,,1733239255953.9cb1ca4e5b5289fcc2a0bafc5801cb91.', STARTKEY => '', ENDKEY => ''}] 2024-12-03T15:21:25,319 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-03T15:21:25,319 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733239285319"}]},"ts":"9223372036854775807"} 2024-12-03T15:21:25,321 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-03T15:21:25,324 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=59, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:25,325 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 71 msec 2024-12-03T15:21:25,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-03T15:21:25,356 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-03T15:21:25,367 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=242 (was 241) Potentially hanging thread: hconnection-0x240475eb-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1915712582_22 at /127.0.0.1:56482 [Waiting for operation #504] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1502572181_22 at /127.0.0.1:57254 [Waiting for operation #799] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x240475eb-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x240475eb-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1915712582_22 at /127.0.0.1:56832 [Waiting for operation #425] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1502572181_22 at /127.0.0.1:56844 [Waiting for operation #427] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/cluster_ee7c9693-9d66-43f8-9a07-adf8cdb8bef5/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/cluster_ee7c9693-9d66-43f8-9a07-adf8cdb8bef5/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x240475eb-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=464 (was 463) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=825 (was 798) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2006 (was 1084) - AvailableMemoryMB LEAK? - 2024-12-03T15:21:25,378 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=242, OpenFileDescriptor=464, MaxFileDescriptor=1048576, SystemLoadAverage=825, ProcessCount=11, AvailableMemoryMB=2003 2024-12-03T15:21:25,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-03T15:21:25,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T15:21:25,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=60, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:25,381 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T15:21:25,381 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:25,381 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 60 2024-12-03T15:21:25,382 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T15:21:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-03T15:21:25,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742074_1250 (size=963) 2024-12-03T15:21:25,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-03T15:21:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-03T15:21:25,790 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411 2024-12-03T15:21:25,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742075_1251 (size=53) 2024-12-03T15:21:25,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-03T15:21:26,053 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-03T15:21:26,197 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:21:26,197 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 5a9eb34ef535e1571d4c28ffefa7e658, disabling compactions & flushes 2024-12-03T15:21:26,197 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:26,197 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:26,197 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. after waiting 0 ms 2024-12-03T15:21:26,197 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:26,197 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:26,197 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:26,198 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T15:21:26,198 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733239286198"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733239286198"}]},"ts":"1733239286198"} 2024-12-03T15:21:26,199 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T15:21:26,200 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T15:21:26,200 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239286200"}]},"ts":"1733239286200"} 2024-12-03T15:21:26,200 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-03T15:21:26,204 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a9eb34ef535e1571d4c28ffefa7e658, ASSIGN}] 2024-12-03T15:21:26,205 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a9eb34ef535e1571d4c28ffefa7e658, ASSIGN 2024-12-03T15:21:26,206 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a9eb34ef535e1571d4c28ffefa7e658, ASSIGN; state=OFFLINE, location=2b5ef621a0dd,46815,1733239226292; forceNewPlan=false, retain=false 2024-12-03T15:21:26,356 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=5a9eb34ef535e1571d4c28ffefa7e658, regionState=OPENING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:26,357 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; OpenRegionProcedure 5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:21:26,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-03T15:21:26,509 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:26,512 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:26,512 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7285): Opening region: {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} 2024-12-03T15:21:26,513 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:26,513 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:21:26,513 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7327): checking encryption for 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:26,513 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7330): checking classloading for 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:26,514 INFO [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:26,515 INFO [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:21:26,515 INFO [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a9eb34ef535e1571d4c28ffefa7e658 columnFamilyName A 2024-12-03T15:21:26,516 DEBUG [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:26,516 INFO [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] regionserver.HStore(327): Store=5a9eb34ef535e1571d4c28ffefa7e658/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:21:26,516 INFO [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:26,517 INFO [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:21:26,517 INFO [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a9eb34ef535e1571d4c28ffefa7e658 columnFamilyName B 2024-12-03T15:21:26,517 DEBUG [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:26,518 INFO [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] regionserver.HStore(327): Store=5a9eb34ef535e1571d4c28ffefa7e658/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:21:26,518 INFO [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:26,518 INFO [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:21:26,519 INFO [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5a9eb34ef535e1571d4c28ffefa7e658 columnFamilyName C 2024-12-03T15:21:26,519 DEBUG [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:26,519 INFO [StoreOpener-5a9eb34ef535e1571d4c28ffefa7e658-1 {}] regionserver.HStore(327): Store=5a9eb34ef535e1571d4c28ffefa7e658/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:21:26,519 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:26,520 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:26,520 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:26,522 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T15:21:26,523 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1085): writing seq id for 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:26,526 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T15:21:26,526 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1102): Opened 5a9eb34ef535e1571d4c28ffefa7e658; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69820457, jitterRate=0.04040588438510895}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T15:21:26,527 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1001): Region open journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:26,528 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., pid=62, masterSystemTime=1733239286509 2024-12-03T15:21:26,529 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:26,529 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:26,529 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=5a9eb34ef535e1571d4c28ffefa7e658, regionState=OPEN, openSeqNum=2, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:26,532 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-03T15:21:26,532 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; OpenRegionProcedure 5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 in 174 msec 2024-12-03T15:21:26,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-12-03T15:21:26,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a9eb34ef535e1571d4c28ffefa7e658, ASSIGN in 328 msec 2024-12-03T15:21:26,534 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T15:21:26,534 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239286534"}]},"ts":"1733239286534"} 2024-12-03T15:21:26,535 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-03T15:21:26,538 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T15:21:26,540 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1590 sec 2024-12-03T15:21:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-03T15:21:27,486 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 60 completed 2024-12-03T15:21:27,488 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c1ac389 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44645c55 2024-12-03T15:21:27,491 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@669e1999, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:21:27,492 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:21:27,493 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52922, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:21:27,494 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T15:21:27,495 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39218, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T15:21:27,497 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x028e73c0 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64ee0130 2024-12-03T15:21:27,500 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aa9ee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:21:27,501 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c480dfb to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683b64c3 2024-12-03T15:21:27,504 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ec09297, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:21:27,504 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34cb3991 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e55eb7 2024-12-03T15:21:27,507 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dfb20f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:21:27,508 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e9ae050 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a703d2 2024-12-03T15:21:27,510 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17cf7fc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:21:27,511 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2fef31f8 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14ed1e44 2024-12-03T15:21:27,513 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78b04266, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:21:27,515 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72537a47 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@88aa519 2024-12-03T15:21:27,517 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66e575aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:21:27,518 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x036642cb to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e998dd3 2024-12-03T15:21:27,521 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@131ceb8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:21:27,522 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c299cfb to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e4c79b8 2024-12-03T15:21:27,524 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a78bf6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:21:27,525 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x605827c9 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d1403c3 2024-12-03T15:21:27,527 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328852db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:21:27,528 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3677bd4f to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3bf0ba59 2024-12-03T15:21:27,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b9e2976, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:21:27,533 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:27,534 DEBUG [hconnection-0x1e56fc6b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:21:27,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-12-03T15:21:27,535 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:27,535 DEBUG [hconnection-0x64ba13df-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:21:27,535 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52928, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:21:27,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-03T15:21:27,536 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:27,536 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52932, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:21:27,536 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:27,536 DEBUG [hconnection-0x1f0b5c24-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:21:27,537 DEBUG [hconnection-0x42b6a9aa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:21:27,537 DEBUG [hconnection-0x45a86740-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:21:27,537 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52942, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:21:27,538 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52954, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:21:27,538 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52960, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:21:27,542 DEBUG [hconnection-0x16c535ad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:21:27,542 DEBUG [hconnection-0x2c9198e6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:21:27,543 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52970, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:21:27,543 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52976, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:21:27,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:27,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-03T15:21:27,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:27,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:27,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:27,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:27,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:27,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:27,561 DEBUG [hconnection-0x29976ce1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:21:27,562 DEBUG [hconnection-0x64a09ea8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:21:27,562 DEBUG [hconnection-0x56c86fcf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:21:27,563 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:21:27,563 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:21:27,563 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53012, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:21:27,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/1a0a0d3fea0843b59b9a5e1d59af0bf5 is 50, key is test_row_0/A:col10/1733239287542/Put/seqid=0 2024-12-03T15:21:27,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742076_1252 (size=12001) 2024-12-03T15:21:27,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239347598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239347600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239347601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239347603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239347607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-03T15:21:27,688 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-03T15:21:27,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:27,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:27,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:27,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:27,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:27,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239347704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239347704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239347705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239347708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239347710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-03T15:21:27,841 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-03T15:21:27,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:27,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:27,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:27,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:27,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:27,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239347906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239347907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239347907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239347911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:27,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239347913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/1a0a0d3fea0843b59b9a5e1d59af0bf5 2024-12-03T15:21:27,997 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:27,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-03T15:21:27,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:27,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:27,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:27,998 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:27,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:27,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:28,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/93cb1323e4a7462daa0d49c2f9bd1cde is 50, key is test_row_0/B:col10/1733239287542/Put/seqid=0 2024-12-03T15:21:28,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742077_1253 (size=12001) 2024-12-03T15:21:28,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/93cb1323e4a7462daa0d49c2f9bd1cde 2024-12-03T15:21:28,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/0ef400c3411642e68a10c519535c2c8d is 50, key is test_row_0/C:col10/1733239287542/Put/seqid=0 2024-12-03T15:21:28,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742078_1254 (size=12001) 2024-12-03T15:21:28,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/0ef400c3411642e68a10c519535c2c8d 2024-12-03T15:21:28,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/1a0a0d3fea0843b59b9a5e1d59af0bf5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1a0a0d3fea0843b59b9a5e1d59af0bf5 2024-12-03T15:21:28,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1a0a0d3fea0843b59b9a5e1d59af0bf5, entries=150, sequenceid=14, filesize=11.7 K 2024-12-03T15:21:28,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/93cb1323e4a7462daa0d49c2f9bd1cde as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/93cb1323e4a7462daa0d49c2f9bd1cde 2024-12-03T15:21:28,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/93cb1323e4a7462daa0d49c2f9bd1cde, entries=150, sequenceid=14, filesize=11.7 K 2024-12-03T15:21:28,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/0ef400c3411642e68a10c519535c2c8d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/0ef400c3411642e68a10c519535c2c8d 2024-12-03T15:21:28,094 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/0ef400c3411642e68a10c519535c2c8d, entries=150, sequenceid=14, filesize=11.7 K 2024-12-03T15:21:28,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 5a9eb34ef535e1571d4c28ffefa7e658 in 547ms, sequenceid=14, compaction requested=false 2024-12-03T15:21:28,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:28,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-03T15:21:28,150 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-03T15:21:28,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:28,151 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-03T15:21:28,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:28,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:28,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:28,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:28,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:28,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:28,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/63bca4b61cf24b638e783800a4af92e4 is 50, key is test_row_0/A:col10/1733239287599/Put/seqid=0 2024-12-03T15:21:28,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742079_1255 (size=12001) 2024-12-03T15:21:28,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:28,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:28,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239348216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239348217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239348219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239348220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239348220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239348321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239348322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239348323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239348323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239348523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239348525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239348525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239348526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,565 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/63bca4b61cf24b638e783800a4af92e4 2024-12-03T15:21:28,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/75895ba83f774f23ad104b68d1f494d1 is 50, key is test_row_0/B:col10/1733239287599/Put/seqid=0 2024-12-03T15:21:28,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742080_1256 (size=12001) 2024-12-03T15:21:28,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-03T15:21:28,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239348724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239348827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239348830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239348830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:28,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239348830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:28,998 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/75895ba83f774f23ad104b68d1f494d1 2024-12-03T15:21:29,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/08c79b96b7594a7e856b62cb7f041b75 is 50, key is test_row_0/C:col10/1733239287599/Put/seqid=0 2024-12-03T15:21:29,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742081_1257 (size=12001) 2024-12-03T15:21:29,026 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/08c79b96b7594a7e856b62cb7f041b75 2024-12-03T15:21:29,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/63bca4b61cf24b638e783800a4af92e4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/63bca4b61cf24b638e783800a4af92e4 2024-12-03T15:21:29,041 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/63bca4b61cf24b638e783800a4af92e4, entries=150, sequenceid=38, filesize=11.7 K 2024-12-03T15:21:29,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/75895ba83f774f23ad104b68d1f494d1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/75895ba83f774f23ad104b68d1f494d1 2024-12-03T15:21:29,046 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/75895ba83f774f23ad104b68d1f494d1, entries=150, sequenceid=38, filesize=11.7 K 2024-12-03T15:21:29,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/08c79b96b7594a7e856b62cb7f041b75 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/08c79b96b7594a7e856b62cb7f041b75 2024-12-03T15:21:29,051 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/08c79b96b7594a7e856b62cb7f041b75, entries=150, sequenceid=38, filesize=11.7 K 2024-12-03T15:21:29,051 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 5a9eb34ef535e1571d4c28ffefa7e658 in 900ms, sequenceid=38, compaction requested=false 2024-12-03T15:21:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:29,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-12-03T15:21:29,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-12-03T15:21:29,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-03T15:21:29,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5170 sec 2024-12-03T15:21:29,056 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 1.5220 sec 2024-12-03T15:21:29,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:29,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-03T15:21:29,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:29,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:29,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:29,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:29,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:29,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:29,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/7d2de0c5c36b4bab857cd806be11603f is 50, key is test_row_0/A:col10/1733239288218/Put/seqid=0 2024-12-03T15:21:29,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742082_1258 (size=12001) 2024-12-03T15:21:29,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239349399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239349401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239349402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239349405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,439 DEBUG [master/2b5ef621a0dd:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 31c39c5a8622ff80b89b6cf13dfade9c changed from -1.0 to 0.0, refreshing cache 2024-12-03T15:21:29,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239349503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239349506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239349507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239349510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-03T15:21:29,642 INFO [Thread-1165 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-03T15:21:29,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:29,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-12-03T15:21:29,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-03T15:21:29,670 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:29,674 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:29,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:29,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239349709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239349714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239349714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239349714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:29,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239349733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-03T15:21:29,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/7d2de0c5c36b4bab857cd806be11603f 2024-12-03T15:21:29,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/0928d77034d347f29030f1a9f90d228f is 50, key is test_row_0/B:col10/1733239288218/Put/seqid=0 2024-12-03T15:21:29,834 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:29,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-03T15:21:29,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:29,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:29,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:29,838 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:29,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:29,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:29,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742083_1259 (size=12001) 2024-12-03T15:21:29,865 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/0928d77034d347f29030f1a9f90d228f 2024-12-03T15:21:29,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/01b999892816463d97cbd759305f9689 is 50, key is test_row_0/C:col10/1733239288218/Put/seqid=0 2024-12-03T15:21:29,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742084_1260 (size=12001) 2024-12-03T15:21:29,924 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/01b999892816463d97cbd759305f9689 2024-12-03T15:21:29,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/7d2de0c5c36b4bab857cd806be11603f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7d2de0c5c36b4bab857cd806be11603f 2024-12-03T15:21:29,945 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7d2de0c5c36b4bab857cd806be11603f, entries=150, sequenceid=52, filesize=11.7 K 2024-12-03T15:21:29,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/0928d77034d347f29030f1a9f90d228f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0928d77034d347f29030f1a9f90d228f 2024-12-03T15:21:29,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0928d77034d347f29030f1a9f90d228f, entries=150, sequenceid=52, filesize=11.7 K 2024-12-03T15:21:29,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/01b999892816463d97cbd759305f9689 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/01b999892816463d97cbd759305f9689 2024-12-03T15:21:29,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-03T15:21:29,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/01b999892816463d97cbd759305f9689, entries=150, sequenceid=52, filesize=11.7 K 2024-12-03T15:21:29,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 5a9eb34ef535e1571d4c28ffefa7e658 in 642ms, sequenceid=52, compaction requested=true 2024-12-03T15:21:29,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:29,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:29,977 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:29,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:29,977 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:29,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:29,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:29,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:29,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:29,978 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:29,978 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:29,978 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/A is initiating minor compaction (all files) 2024-12-03T15:21:29,978 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/B is initiating minor compaction (all files) 2024-12-03T15:21:29,978 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/B in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:29,978 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/93cb1323e4a7462daa0d49c2f9bd1cde, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/75895ba83f774f23ad104b68d1f494d1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0928d77034d347f29030f1a9f90d228f] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=35.2 K 2024-12-03T15:21:29,979 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 93cb1323e4a7462daa0d49c2f9bd1cde, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733239287542 2024-12-03T15:21:29,979 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/A in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:29,979 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1a0a0d3fea0843b59b9a5e1d59af0bf5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/63bca4b61cf24b638e783800a4af92e4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7d2de0c5c36b4bab857cd806be11603f] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=35.2 K 2024-12-03T15:21:29,979 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a0a0d3fea0843b59b9a5e1d59af0bf5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733239287542 2024-12-03T15:21:29,979 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 75895ba83f774f23ad104b68d1f494d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733239287594 2024-12-03T15:21:29,980 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 0928d77034d347f29030f1a9f90d228f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733239288218 2024-12-03T15:21:29,980 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63bca4b61cf24b638e783800a4af92e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733239287594 2024-12-03T15:21:29,980 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d2de0c5c36b4bab857cd806be11603f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733239288218 2024-12-03T15:21:30,000 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-03T15:21:30,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:30,001 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#B#compaction#210 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:30,001 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-03T15:21:30,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:30,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:30,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:30,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:30,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:30,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:30,003 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/42be445b2735415f9ccd1da2ba9970a6 is 50, key is test_row_0/B:col10/1733239288218/Put/seqid=0 2024-12-03T15:21:30,016 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#A#compaction#211 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:30,016 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/266d781ba5594494881952a0fc125952 is 50, key is test_row_0/A:col10/1733239288218/Put/seqid=0 2024-12-03T15:21:30,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:30,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:30,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/30ee988711784d98b78f1c5e9e85b584 is 50, key is test_row_0/A:col10/1733239289399/Put/seqid=0 2024-12-03T15:21:30,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239350041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239350043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239350044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239350044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742085_1261 (size=12104) 2024-12-03T15:21:30,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742087_1263 (size=12001) 2024-12-03T15:21:30,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742086_1262 (size=12104) 2024-12-03T15:21:30,138 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/266d781ba5594494881952a0fc125952 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/266d781ba5594494881952a0fc125952 2024-12-03T15:21:30,143 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/A of 5a9eb34ef535e1571d4c28ffefa7e658 into 266d781ba5594494881952a0fc125952(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:30,143 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:30,143 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/A, priority=13, startTime=1733239289977; duration=0sec 2024-12-03T15:21:30,143 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:30,144 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:A 2024-12-03T15:21:30,144 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:30,146 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:30,146 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/C is initiating minor compaction (all files) 2024-12-03T15:21:30,146 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/C in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:30,146 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/0ef400c3411642e68a10c519535c2c8d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/08c79b96b7594a7e856b62cb7f041b75, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/01b999892816463d97cbd759305f9689] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=35.2 K 2024-12-03T15:21:30,150 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ef400c3411642e68a10c519535c2c8d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733239287542 2024-12-03T15:21:30,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239350148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239350151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,153 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08c79b96b7594a7e856b62cb7f041b75, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733239287594 2024-12-03T15:21:30,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239350153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,157 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01b999892816463d97cbd759305f9689, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733239288218 2024-12-03T15:21:30,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239350157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,192 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#C#compaction#213 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:30,192 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/69d5c71ecc94492e9f9b5a128560c1c3 is 50, key is test_row_0/C:col10/1733239288218/Put/seqid=0 2024-12-03T15:21:30,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742088_1264 (size=12104) 2024-12-03T15:21:30,231 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-03T15:21:30,242 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/69d5c71ecc94492e9f9b5a128560c1c3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/69d5c71ecc94492e9f9b5a128560c1c3 2024-12-03T15:21:30,251 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/C of 5a9eb34ef535e1571d4c28ffefa7e658 into 69d5c71ecc94492e9f9b5a128560c1c3(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:30,251 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:30,251 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/C, priority=13, startTime=1733239289977; duration=0sec 2024-12-03T15:21:30,251 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:30,251 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:C 2024-12-03T15:21:30,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-03T15:21:30,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239350353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239350366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239350366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239350367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,481 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/42be445b2735415f9ccd1da2ba9970a6 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/42be445b2735415f9ccd1da2ba9970a6 2024-12-03T15:21:30,528 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/30ee988711784d98b78f1c5e9e85b584 2024-12-03T15:21:30,534 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/B of 5a9eb34ef535e1571d4c28ffefa7e658 into 42be445b2735415f9ccd1da2ba9970a6(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:30,534 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:30,534 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/B, priority=13, startTime=1733239289977; duration=0sec 2024-12-03T15:21:30,534 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:30,534 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:B 2024-12-03T15:21:30,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/e650d5dbb1d84586a1ac3a684464a3ea is 50, key is test_row_0/B:col10/1733239289399/Put/seqid=0 2024-12-03T15:21:30,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742089_1265 (size=12001) 2024-12-03T15:21:30,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239350661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239350670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239350674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239350673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:30,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-03T15:21:30,996 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/e650d5dbb1d84586a1ac3a684464a3ea 2024-12-03T15:21:31,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/8be01a4bf7794f1eaf9475231de51bc4 is 50, key is test_row_0/C:col10/1733239289399/Put/seqid=0 2024-12-03T15:21:31,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742090_1266 (size=12001) 2024-12-03T15:21:31,081 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/8be01a4bf7794f1eaf9475231de51bc4 2024-12-03T15:21:31,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/30ee988711784d98b78f1c5e9e85b584 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/30ee988711784d98b78f1c5e9e85b584 2024-12-03T15:21:31,093 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/30ee988711784d98b78f1c5e9e85b584, entries=150, sequenceid=75, filesize=11.7 K 2024-12-03T15:21:31,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/e650d5dbb1d84586a1ac3a684464a3ea as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/e650d5dbb1d84586a1ac3a684464a3ea 2024-12-03T15:21:31,115 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/e650d5dbb1d84586a1ac3a684464a3ea, entries=150, sequenceid=75, filesize=11.7 K 2024-12-03T15:21:31,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/8be01a4bf7794f1eaf9475231de51bc4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/8be01a4bf7794f1eaf9475231de51bc4 2024-12-03T15:21:31,144 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/8be01a4bf7794f1eaf9475231de51bc4, entries=150, sequenceid=75, filesize=11.7 K 2024-12-03T15:21:31,152 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 5a9eb34ef535e1571d4c28ffefa7e658 in 1151ms, sequenceid=75, compaction requested=false 2024-12-03T15:21:31,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:31,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:31,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-12-03T15:21:31,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-12-03T15:21:31,155 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-03T15:21:31,155 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4800 sec 2024-12-03T15:21:31,157 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 1.4980 sec 2024-12-03T15:21:31,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:31,168 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-03T15:21:31,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:31,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:31,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:31,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:31,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:31,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:31,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/39b42f13f905402eae5ad933f6469d9a is 50, key is test_row_0/A:col10/1733239290030/Put/seqid=0 2024-12-03T15:21:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742091_1267 (size=14341) 2024-12-03T15:21:31,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/39b42f13f905402eae5ad933f6469d9a 2024-12-03T15:21:31,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239351245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/f7103413680549688afdf7349db63d70 is 50, key is test_row_0/B:col10/1733239290030/Put/seqid=0 2024-12-03T15:21:31,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239351247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239351250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239351244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742092_1268 (size=12001) 2024-12-03T15:21:31,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/f7103413680549688afdf7349db63d70 2024-12-03T15:21:31,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/868e1a442e934b47903742b8fe0b4080 is 50, key is test_row_0/C:col10/1733239290030/Put/seqid=0 2024-12-03T15:21:31,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742093_1269 (size=12001) 2024-12-03T15:21:31,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/868e1a442e934b47903742b8fe0b4080 2024-12-03T15:21:31,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239351367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239351369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239351370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/39b42f13f905402eae5ad933f6469d9a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/39b42f13f905402eae5ad933f6469d9a 2024-12-03T15:21:31,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239351378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,394 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/39b42f13f905402eae5ad933f6469d9a, entries=200, sequenceid=92, filesize=14.0 K 2024-12-03T15:21:31,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/f7103413680549688afdf7349db63d70 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/f7103413680549688afdf7349db63d70 2024-12-03T15:21:31,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/f7103413680549688afdf7349db63d70, entries=150, sequenceid=92, filesize=11.7 K 2024-12-03T15:21:31,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/868e1a442e934b47903742b8fe0b4080 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/868e1a442e934b47903742b8fe0b4080 2024-12-03T15:21:31,410 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/868e1a442e934b47903742b8fe0b4080, entries=150, sequenceid=92, filesize=11.7 K 2024-12-03T15:21:31,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 5a9eb34ef535e1571d4c28ffefa7e658 in 266ms, sequenceid=92, compaction requested=true 2024-12-03T15:21:31,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:31,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:31,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:31,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:31,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:21:31,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:31,435 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-03T15:21:31,437 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:31,438 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:31,442 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:31,442 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/A is initiating minor compaction (all files) 2024-12-03T15:21:31,442 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/A in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:31,442 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/266d781ba5594494881952a0fc125952, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/30ee988711784d98b78f1c5e9e85b584, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/39b42f13f905402eae5ad933f6469d9a] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=37.5 K 2024-12-03T15:21:31,447 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 266d781ba5594494881952a0fc125952, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733239288218 2024-12-03T15:21:31,448 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 30ee988711784d98b78f1c5e9e85b584, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733239289399 2024-12-03T15:21:31,448 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 39b42f13f905402eae5ad933f6469d9a, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733239290030 2024-12-03T15:21:31,451 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:31,451 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/C is initiating minor compaction (all files) 2024-12-03T15:21:31,451 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/C in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:31,451 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/69d5c71ecc94492e9f9b5a128560c1c3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/8be01a4bf7794f1eaf9475231de51bc4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/868e1a442e934b47903742b8fe0b4080] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=35.3 K 2024-12-03T15:21:31,454 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69d5c71ecc94492e9f9b5a128560c1c3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733239288218 2024-12-03T15:21:31,456 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8be01a4bf7794f1eaf9475231de51bc4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733239289399 2024-12-03T15:21:31,457 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 868e1a442e934b47903742b8fe0b4080, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733239290030 2024-12-03T15:21:31,479 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#A#compaction#219 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:31,479 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/4e10b0eea8b94ffaa65686162370f3c3 is 50, key is test_row_0/A:col10/1733239290030/Put/seqid=0 2024-12-03T15:21:31,495 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#C#compaction#220 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:31,495 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/7f3f9e5b46324609afbc92b84371aa53 is 50, key is test_row_0/C:col10/1733239290030/Put/seqid=0 2024-12-03T15:21:31,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742094_1270 (size=12207) 2024-12-03T15:21:31,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742095_1271 (size=12207) 2024-12-03T15:21:31,567 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/7f3f9e5b46324609afbc92b84371aa53 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7f3f9e5b46324609afbc92b84371aa53 2024-12-03T15:21:31,582 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/C of 5a9eb34ef535e1571d4c28ffefa7e658 into 7f3f9e5b46324609afbc92b84371aa53(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:31,582 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:31,582 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/C, priority=13, startTime=1733239291435; duration=0sec 2024-12-03T15:21:31,582 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:31,582 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:C 2024-12-03T15:21:31,582 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:21:31,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-03T15:21:31,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:31,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:31,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:31,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:31,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:31,583 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:31,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:31,591 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:21:31,591 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/B is initiating minor compaction (all files) 2024-12-03T15:21:31,591 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/B in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:31,591 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/42be445b2735415f9ccd1da2ba9970a6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/e650d5dbb1d84586a1ac3a684464a3ea, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/f7103413680549688afdf7349db63d70] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=35.3 K 2024-12-03T15:21:31,596 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42be445b2735415f9ccd1da2ba9970a6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733239288218 2024-12-03T15:21:31,597 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting e650d5dbb1d84586a1ac3a684464a3ea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733239289399 2024-12-03T15:21:31,598 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7103413680549688afdf7349db63d70, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733239290030 2024-12-03T15:21:31,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/a2f4745f013b432c8f41e4471d9e9ea0 is 50, key is test_row_0/A:col10/1733239291244/Put/seqid=0 2024-12-03T15:21:31,629 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#B#compaction#222 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:31,630 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/b4c8987b041c4d83aa4f6b095b1d6a36 is 50, key is test_row_0/B:col10/1733239290030/Put/seqid=0 2024-12-03T15:21:31,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239351617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239351623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239351623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239351630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742096_1272 (size=14341) 2024-12-03T15:21:31,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/a2f4745f013b432c8f41e4471d9e9ea0 2024-12-03T15:21:31,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742097_1273 (size=12207) 2024-12-03T15:21:31,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d689bd6c64ec4e05851dd63f3dc38c41 is 50, key is test_row_0/B:col10/1733239291244/Put/seqid=0 2024-12-03T15:21:31,738 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/b4c8987b041c4d83aa4f6b095b1d6a36 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/b4c8987b041c4d83aa4f6b095b1d6a36 2024-12-03T15:21:31,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239351737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239351737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,744 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/B of 5a9eb34ef535e1571d4c28ffefa7e658 into b4c8987b041c4d83aa4f6b095b1d6a36(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:31,744 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:31,744 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/B, priority=13, startTime=1733239291434; duration=0sec 2024-12-03T15:21:31,744 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:31,744 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:B 2024-12-03T15:21:31,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239351742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239351743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,752 DEBUG [Thread-1159 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:21:31,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239351746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742098_1274 (size=12001) 2024-12-03T15:21:31,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d689bd6c64ec4e05851dd63f3dc38c41 2024-12-03T15:21:31,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-03T15:21:31,776 INFO [Thread-1165 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-12-03T15:21:31,778 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:31,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-12-03T15:21:31,780 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:31,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-03T15:21:31,780 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:31,781 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:31,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/59a3323b48f6491193396bcf5e74ad62 is 50, key is test_row_0/C:col10/1733239291244/Put/seqid=0 2024-12-03T15:21:31,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742099_1275 (size=12001) 2024-12-03T15:21:31,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/59a3323b48f6491193396bcf5e74ad62 2024-12-03T15:21:31,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/a2f4745f013b432c8f41e4471d9e9ea0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/a2f4745f013b432c8f41e4471d9e9ea0 2024-12-03T15:21:31,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/a2f4745f013b432c8f41e4471d9e9ea0, entries=200, sequenceid=116, filesize=14.0 K 2024-12-03T15:21:31,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d689bd6c64ec4e05851dd63f3dc38c41 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d689bd6c64ec4e05851dd63f3dc38c41 2024-12-03T15:21:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-03T15:21:31,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d689bd6c64ec4e05851dd63f3dc38c41, entries=150, sequenceid=116, filesize=11.7 K 2024-12-03T15:21:31,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/59a3323b48f6491193396bcf5e74ad62 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/59a3323b48f6491193396bcf5e74ad62 2024-12-03T15:21:31,903 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/59a3323b48f6491193396bcf5e74ad62, entries=150, sequenceid=116, filesize=11.7 K 2024-12-03T15:21:31,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 5a9eb34ef535e1571d4c28ffefa7e658 in 324ms, sequenceid=116, compaction requested=false 2024-12-03T15:21:31,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:31,932 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-03T15:21:31,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:31,933 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-03T15:21:31,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:31,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:31,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:31,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:31,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:31,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:31,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:31,944 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/4e10b0eea8b94ffaa65686162370f3c3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4e10b0eea8b94ffaa65686162370f3c3 2024-12-03T15:21:31,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/a08744d0beb144ce93f8ae2eb1a496bb is 50, key is test_row_0/A:col10/1733239291614/Put/seqid=0 2024-12-03T15:21:31,951 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/A of 5a9eb34ef535e1571d4c28ffefa7e658 into 4e10b0eea8b94ffaa65686162370f3c3(size=11.9 K), total size for store is 25.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:31,951 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:31,951 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/A, priority=13, startTime=1733239291434; duration=0sec 2024-12-03T15:21:31,951 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:31,951 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:A 2024-12-03T15:21:31,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742100_1276 (size=12001) 2024-12-03T15:21:31,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239351986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239351987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239351989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:31,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:31,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239351991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-03T15:21:32,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:32,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239352096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:32,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239352096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:32,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239352096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:32,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239352097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:32,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:32,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239352302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239352302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:32,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239352306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:32,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239352309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-03T15:21:32,395 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/a08744d0beb144ce93f8ae2eb1a496bb 2024-12-03T15:21:32,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/8de001cb8ef64323975ca6e1f7094a32 is 50, key is test_row_0/B:col10/1733239291614/Put/seqid=0 2024-12-03T15:21:32,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742101_1277 (size=12001) 2024-12-03T15:21:32,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:32,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239352612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:32,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239352612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:32,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239352612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:32,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239352612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:32,846 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/8de001cb8ef64323975ca6e1f7094a32 2024-12-03T15:21:32,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/db721cfe7e8b4bd483092917a12952fe is 50, key is test_row_0/C:col10/1733239291614/Put/seqid=0 2024-12-03T15:21:32,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-03T15:21:32,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742102_1278 (size=12001) 2024-12-03T15:21:33,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:33,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239353119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:33,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:33,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239353122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:33,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:33,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239353123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:33,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:33,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239353126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:33,314 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/db721cfe7e8b4bd483092917a12952fe 2024-12-03T15:21:33,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/a08744d0beb144ce93f8ae2eb1a496bb as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/a08744d0beb144ce93f8ae2eb1a496bb 2024-12-03T15:21:33,327 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/a08744d0beb144ce93f8ae2eb1a496bb, entries=150, sequenceid=130, filesize=11.7 K 2024-12-03T15:21:33,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/8de001cb8ef64323975ca6e1f7094a32 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8de001cb8ef64323975ca6e1f7094a32 2024-12-03T15:21:33,336 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8de001cb8ef64323975ca6e1f7094a32, entries=150, sequenceid=130, filesize=11.7 K 2024-12-03T15:21:33,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/db721cfe7e8b4bd483092917a12952fe as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/db721cfe7e8b4bd483092917a12952fe 2024-12-03T15:21:33,344 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/db721cfe7e8b4bd483092917a12952fe, entries=150, sequenceid=130, filesize=11.7 K 2024-12-03T15:21:33,346 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 5a9eb34ef535e1571d4c28ffefa7e658 in 1413ms, sequenceid=130, compaction requested=true 2024-12-03T15:21:33,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:33,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:33,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-12-03T15:21:33,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-12-03T15:21:33,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-03T15:21:33,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5680 sec 2024-12-03T15:21:33,368 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 1.5850 sec 2024-12-03T15:21:33,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-03T15:21:33,898 INFO [Thread-1165 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-03T15:21:33,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:33,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-12-03T15:21:33,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-03T15:21:33,938 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:33,939 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:33,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:34,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-03T15:21:34,098 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-03T15:21:34,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:34,099 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-03T15:21:34,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:34,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:34,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:34,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:34,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:34,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:34,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/6ea48d41904747409ea208522961b10f is 50, key is test_row_0/A:col10/1733239291989/Put/seqid=0 2024-12-03T15:21:34,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:34,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:34,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742103_1279 (size=12151) 2024-12-03T15:21:34,170 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/6ea48d41904747409ea208522961b10f 2024-12-03T15:21:34,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239354170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239354170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239354175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239354176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/5ae8f2e876584f8c839146af48ac1a5f is 50, key is test_row_0/B:col10/1733239291989/Put/seqid=0 2024-12-03T15:21:34,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-03T15:21:34,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742104_1280 (size=12151) 2024-12-03T15:21:34,265 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/5ae8f2e876584f8c839146af48ac1a5f 2024-12-03T15:21:34,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239354282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239354283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239354286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239354290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/241c2fdb0bed44e48696ca646ec67da5 is 50, key is test_row_0/C:col10/1733239291989/Put/seqid=0 2024-12-03T15:21:34,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742105_1281 (size=12151) 2024-12-03T15:21:34,335 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/241c2fdb0bed44e48696ca646ec67da5 2024-12-03T15:21:34,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/6ea48d41904747409ea208522961b10f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/6ea48d41904747409ea208522961b10f 2024-12-03T15:21:34,347 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/6ea48d41904747409ea208522961b10f, entries=150, sequenceid=154, filesize=11.9 K 2024-12-03T15:21:34,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/5ae8f2e876584f8c839146af48ac1a5f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5ae8f2e876584f8c839146af48ac1a5f 2024-12-03T15:21:34,354 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5ae8f2e876584f8c839146af48ac1a5f, entries=150, sequenceid=154, filesize=11.9 K 2024-12-03T15:21:34,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/241c2fdb0bed44e48696ca646ec67da5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/241c2fdb0bed44e48696ca646ec67da5 2024-12-03T15:21:34,361 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/241c2fdb0bed44e48696ca646ec67da5, entries=150, sequenceid=154, filesize=11.9 K 2024-12-03T15:21:34,364 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 5a9eb34ef535e1571d4c28ffefa7e658 in 264ms, sequenceid=154, compaction requested=true 2024-12-03T15:21:34,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:34,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:34,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-03T15:21:34,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-03T15:21:34,380 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-03T15:21:34,380 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 431 msec 2024-12-03T15:21:34,394 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 460 msec 2024-12-03T15:21:34,493 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-03T15:21:34,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:34,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:34,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:34,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:34,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:34,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:34,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:34,509 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/4031f720fb4840e4b75718315f728683 is 50, key is test_row_0/A:col10/1733239294173/Put/seqid=0 2024-12-03T15:21:34,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-03T15:21:34,534 INFO [Thread-1165 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-03T15:21:34,546 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:34,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239354542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239354544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-03T15:21:34,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239354547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239354554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,556 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:34,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742106_1282 (size=12151) 2024-12-03T15:21:34,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-03T15:21:34,557 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:34,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:34,557 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/4031f720fb4840e4b75718315f728683 2024-12-03T15:21:34,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/8ab82de80e024fd08e8faea1a5f486ad is 50, key is test_row_0/B:col10/1733239294173/Put/seqid=0 2024-12-03T15:21:34,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742107_1283 (size=12151) 2024-12-03T15:21:34,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/8ab82de80e024fd08e8faea1a5f486ad 2024-12-03T15:21:34,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/7124220096a04f69b775f50f7ecc3b50 is 50, key is test_row_0/C:col10/1733239294173/Put/seqid=0 2024-12-03T15:21:34,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239354652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-03T15:21:34,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239354662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239354666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239354670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742108_1284 (size=12151) 2024-12-03T15:21:34,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/7124220096a04f69b775f50f7ecc3b50 2024-12-03T15:21:34,713 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-03T15:21:34,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:34,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:34,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:34,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:34,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:34,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:34,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/4031f720fb4840e4b75718315f728683 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4031f720fb4840e4b75718315f728683 2024-12-03T15:21:34,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4031f720fb4840e4b75718315f728683, entries=150, sequenceid=169, filesize=11.9 K 2024-12-03T15:21:34,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/8ab82de80e024fd08e8faea1a5f486ad as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8ab82de80e024fd08e8faea1a5f486ad 2024-12-03T15:21:34,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8ab82de80e024fd08e8faea1a5f486ad, entries=150, sequenceid=169, filesize=11.9 K 2024-12-03T15:21:34,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/7124220096a04f69b775f50f7ecc3b50 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7124220096a04f69b775f50f7ecc3b50 2024-12-03T15:21:34,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7124220096a04f69b775f50f7ecc3b50, entries=150, sequenceid=169, filesize=11.9 K 2024-12-03T15:21:34,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 5a9eb34ef535e1571d4c28ffefa7e658 in 253ms, sequenceid=169, compaction requested=true 2024-12-03T15:21:34,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:34,747 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-03T15:21:34,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:34,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:34,747 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-03T15:21:34,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:34,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:34,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:34,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:34,749 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62851 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-03T15:21:34,749 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/A is initiating minor compaction (all files) 2024-12-03T15:21:34,749 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/A in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:34,750 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4e10b0eea8b94ffaa65686162370f3c3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/a2f4745f013b432c8f41e4471d9e9ea0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/a08744d0beb144ce93f8ae2eb1a496bb, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/6ea48d41904747409ea208522961b10f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4031f720fb4840e4b75718315f728683] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=61.4 K 2024-12-03T15:21:34,750 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60511 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-03T15:21:34,750 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e10b0eea8b94ffaa65686162370f3c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733239290030 2024-12-03T15:21:34,750 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/B is initiating minor compaction (all files) 2024-12-03T15:21:34,750 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/B in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:34,750 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/b4c8987b041c4d83aa4f6b095b1d6a36, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d689bd6c64ec4e05851dd63f3dc38c41, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8de001cb8ef64323975ca6e1f7094a32, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5ae8f2e876584f8c839146af48ac1a5f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8ab82de80e024fd08e8faea1a5f486ad] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=59.1 K 2024-12-03T15:21:34,750 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2f4745f013b432c8f41e4471d9e9ea0, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733239291222 2024-12-03T15:21:34,751 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b4c8987b041c4d83aa4f6b095b1d6a36, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733239290030 2024-12-03T15:21:34,751 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting a08744d0beb144ce93f8ae2eb1a496bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733239291612 2024-12-03T15:21:34,751 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting d689bd6c64ec4e05851dd63f3dc38c41, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733239291222 2024-12-03T15:21:34,752 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ea48d41904747409ea208522961b10f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733239291965 2024-12-03T15:21:34,752 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8de001cb8ef64323975ca6e1f7094a32, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733239291612 2024-12-03T15:21:34,752 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4031f720fb4840e4b75718315f728683, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733239294173 2024-12-03T15:21:34,752 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ae8f2e876584f8c839146af48ac1a5f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733239291965 2024-12-03T15:21:34,753 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ab82de80e024fd08e8faea1a5f486ad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733239294173 2024-12-03T15:21:34,775 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#B#compaction#234 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:34,776 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/6989f4f1a17d4ebd8c787db0b1d098cb is 50, key is test_row_0/B:col10/1733239294173/Put/seqid=0 2024-12-03T15:21:34,796 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#A#compaction#235 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:34,797 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/d38729c8dde5451e9d2efe9b024c0bc2 is 50, key is test_row_0/A:col10/1733239294173/Put/seqid=0 2024-12-03T15:21:34,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742109_1285 (size=12527) 2024-12-03T15:21:34,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:34,863 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-03T15:21:34,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:34,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:34,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:34,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:34,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:34,863 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-03T15:21:34,866 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742110_1286 (size=12527) 2024-12-03T15:21:34,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-03T15:21:34,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:34,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:34,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:34,870 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:34,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:34,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:34,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/ed5b1ff3c1a248feabb9305960c7bbbd is 50, key is test_row_0/A:col10/1733239294536/Put/seqid=0 2024-12-03T15:21:34,900 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/d38729c8dde5451e9d2efe9b024c0bc2 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/d38729c8dde5451e9d2efe9b024c0bc2 2024-12-03T15:21:34,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239354909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239354909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,915 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/A of 5a9eb34ef535e1571d4c28ffefa7e658 into d38729c8dde5451e9d2efe9b024c0bc2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:34,915 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:34,915 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/A, priority=11, startTime=1733239294747; duration=0sec 2024-12-03T15:21:34,916 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:34,916 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:A 2024-12-03T15:21:34,916 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-03T15:21:34,919 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60511 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-03T15:21:34,919 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/C is initiating minor compaction (all files) 2024-12-03T15:21:34,919 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/C in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:34,919 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7f3f9e5b46324609afbc92b84371aa53, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/59a3323b48f6491193396bcf5e74ad62, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/db721cfe7e8b4bd483092917a12952fe, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/241c2fdb0bed44e48696ca646ec67da5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7124220096a04f69b775f50f7ecc3b50] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=59.1 K 2024-12-03T15:21:34,919 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f3f9e5b46324609afbc92b84371aa53, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733239290030 2024-12-03T15:21:34,920 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59a3323b48f6491193396bcf5e74ad62, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733239291222 2024-12-03T15:21:34,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239354909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,921 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting db721cfe7e8b4bd483092917a12952fe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733239291612 2024-12-03T15:21:34,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:34,922 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 241c2fdb0bed44e48696ca646ec67da5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733239291965 2024-12-03T15:21:34,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239354918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:34,923 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7124220096a04f69b775f50f7ecc3b50, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733239294173 2024-12-03T15:21:34,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742111_1287 (size=14541) 2024-12-03T15:21:34,936 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#C#compaction#237 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:34,937 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/61cf08edb5444cb7bdad2eaeb6e1b864 is 50, key is test_row_0/C:col10/1733239294173/Put/seqid=0 2024-12-03T15:21:34,938 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/ed5b1ff3c1a248feabb9305960c7bbbd 2024-12-03T15:21:34,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742112_1288 (size=12527) 2024-12-03T15:21:34,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/0948adb7e1ed4c3383d002cddd4d1ace is 50, key is test_row_0/B:col10/1733239294536/Put/seqid=0 2024-12-03T15:21:35,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239355016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,022 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-03T15:21:35,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:35,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:35,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:35,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:35,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239355018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239355022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239355024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:35,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742113_1289 (size=12151) 2024-12-03T15:21:35,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/0948adb7e1ed4c3383d002cddd4d1ace 2024-12-03T15:21:35,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/2d01526d6808453fb749b4e010fbef05 is 50, key is test_row_0/C:col10/1733239294536/Put/seqid=0 2024-12-03T15:21:35,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742114_1290 (size=12151) 2024-12-03T15:21:35,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-03T15:21:35,190 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,190 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-03T15:21:35,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:35,191 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:35,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:35,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239355222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239355228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239355237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239355238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,266 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/6989f4f1a17d4ebd8c787db0b1d098cb as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/6989f4f1a17d4ebd8c787db0b1d098cb 2024-12-03T15:21:35,283 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/B of 5a9eb34ef535e1571d4c28ffefa7e658 into 6989f4f1a17d4ebd8c787db0b1d098cb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:35,284 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:35,284 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/B, priority=11, startTime=1733239294747; duration=0sec 2024-12-03T15:21:35,284 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:35,284 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:B 2024-12-03T15:21:35,346 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-03T15:21:35,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:35,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:35,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:35,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:35,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:35,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:35,388 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/61cf08edb5444cb7bdad2eaeb6e1b864 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/61cf08edb5444cb7bdad2eaeb6e1b864 2024-12-03T15:21:35,403 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/C of 5a9eb34ef535e1571d4c28ffefa7e658 into 61cf08edb5444cb7bdad2eaeb6e1b864(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:35,403 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:35,403 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/C, priority=11, startTime=1733239294747; duration=0sec 2024-12-03T15:21:35,403 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:35,403 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:C 2024-12-03T15:21:35,521 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-03T15:21:35,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:35,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:35,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:35,529 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:35,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:35,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:35,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239355529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/2d01526d6808453fb749b4e010fbef05 2024-12-03T15:21:35,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239355544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239355546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239355551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/ed5b1ff3c1a248feabb9305960c7bbbd as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/ed5b1ff3c1a248feabb9305960c7bbbd 2024-12-03T15:21:35,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/ed5b1ff3c1a248feabb9305960c7bbbd, entries=200, sequenceid=191, filesize=14.2 K 2024-12-03T15:21:35,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/0948adb7e1ed4c3383d002cddd4d1ace as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0948adb7e1ed4c3383d002cddd4d1ace 2024-12-03T15:21:35,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0948adb7e1ed4c3383d002cddd4d1ace, entries=150, sequenceid=191, filesize=11.9 K 2024-12-03T15:21:35,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/2d01526d6808453fb749b4e010fbef05 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2d01526d6808453fb749b4e010fbef05 2024-12-03T15:21:35,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2d01526d6808453fb749b4e010fbef05, entries=150, sequenceid=191, filesize=11.9 K 2024-12-03T15:21:35,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 5a9eb34ef535e1571d4c28ffefa7e658 in 791ms, sequenceid=191, compaction requested=false 2024-12-03T15:21:35,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:35,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-03T15:21:35,680 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-03T15:21:35,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:35,681 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-03T15:21:35,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:35,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:35,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:35,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:35,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:35,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:35,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/2ba216904e2f4486887d836aed0146ab is 50, key is test_row_0/A:col10/1733239294893/Put/seqid=0 2024-12-03T15:21:35,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742115_1291 (size=12151) 2024-12-03T15:21:35,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:35,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:35,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239355839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:35,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:35,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239355944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:36,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:36,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239356031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:36,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:36,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239356050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:36,053 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-03T15:21:36,053 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-03T15:21:36,057 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:36,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239356056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:36,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:36,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239356057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:36,139 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/2ba216904e2f4486887d836aed0146ab 2024-12-03T15:21:36,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d7452f1a29bc41299218a616465af068 is 50, key is test_row_0/B:col10/1733239294893/Put/seqid=0 2024-12-03T15:21:36,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:36,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239356150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:36,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742116_1292 (size=12151) 2024-12-03T15:21:36,182 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d7452f1a29bc41299218a616465af068 2024-12-03T15:21:36,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/01e4df141f574d43b8f0823563094f4f is 50, key is test_row_0/C:col10/1733239294893/Put/seqid=0 2024-12-03T15:21:36,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742117_1293 (size=12151) 2024-12-03T15:21:36,258 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/01e4df141f574d43b8f0823563094f4f 2024-12-03T15:21:36,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/2ba216904e2f4486887d836aed0146ab as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/2ba216904e2f4486887d836aed0146ab 2024-12-03T15:21:36,269 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/2ba216904e2f4486887d836aed0146ab, entries=150, sequenceid=208, filesize=11.9 K 2024-12-03T15:21:36,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d7452f1a29bc41299218a616465af068 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d7452f1a29bc41299218a616465af068 2024-12-03T15:21:36,287 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d7452f1a29bc41299218a616465af068, entries=150, sequenceid=208, filesize=11.9 K 2024-12-03T15:21:36,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/01e4df141f574d43b8f0823563094f4f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/01e4df141f574d43b8f0823563094f4f 2024-12-03T15:21:36,295 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/01e4df141f574d43b8f0823563094f4f, entries=150, sequenceid=208, filesize=11.9 K 2024-12-03T15:21:36,296 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 5a9eb34ef535e1571d4c28ffefa7e658 in 615ms, sequenceid=208, compaction requested=true 2024-12-03T15:21:36,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:36,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:36,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-03T15:21:36,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-03T15:21:36,310 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-03T15:21:36,310 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7400 sec 2024-12-03T15:21:36,314 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.7650 sec 2024-12-03T15:21:36,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:36,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-03T15:21:36,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:36,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:36,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:36,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:36,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:36,459 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:36,476 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/bef950c1f77f4695b2bfd783e3e8d0d3 is 50, key is test_row_0/A:col10/1733239296456/Put/seqid=0 2024-12-03T15:21:36,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742118_1294 (size=14541) 2024-12-03T15:21:36,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/bef950c1f77f4695b2bfd783e3e8d0d3 2024-12-03T15:21:36,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:36,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239356509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:36,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/5822671b20e64df490240b4802ed2619 is 50, key is test_row_0/B:col10/1733239296456/Put/seqid=0 2024-12-03T15:21:36,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742119_1295 (size=12151) 2024-12-03T15:21:36,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/5822671b20e64df490240b4802ed2619 2024-12-03T15:21:36,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/952a5b45608b444a928792044f9791ca is 50, key is test_row_0/C:col10/1733239296456/Put/seqid=0 2024-12-03T15:21:36,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742120_1296 (size=12151) 2024-12-03T15:21:36,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:36,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239356622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:36,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-03T15:21:36,668 INFO [Thread-1165 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-03T15:21:36,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:36,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-03T15:21:36,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-03T15:21:36,679 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:36,680 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:36,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:36,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-03T15:21:36,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239356825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:36,832 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:36,833 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-03T15:21:36,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:36,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:36,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:36,837 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:36,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:36,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-03T15:21:36,999 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:36,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-03T15:21:36,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:36,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:36,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:36,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/952a5b45608b444a928792044f9791ca 2024-12-03T15:21:37,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/bef950c1f77f4695b2bfd783e3e8d0d3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/bef950c1f77f4695b2bfd783e3e8d0d3 2024-12-03T15:21:37,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/bef950c1f77f4695b2bfd783e3e8d0d3, entries=200, sequenceid=231, filesize=14.2 K 2024-12-03T15:21:37,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/5822671b20e64df490240b4802ed2619 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5822671b20e64df490240b4802ed2619 2024-12-03T15:21:37,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5822671b20e64df490240b4802ed2619, entries=150, sequenceid=231, filesize=11.9 K 2024-12-03T15:21:37,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/952a5b45608b444a928792044f9791ca as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/952a5b45608b444a928792044f9791ca 2024-12-03T15:21:37,040 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/952a5b45608b444a928792044f9791ca, entries=150, sequenceid=231, filesize=11.9 K 2024-12-03T15:21:37,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 5a9eb34ef535e1571d4c28ffefa7e658 in 584ms, sequenceid=231, compaction requested=true 2024-12-03T15:21:37,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:37,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:37,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:37,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:37,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:21:37,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:37,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:21:37,043 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:37,043 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:37,045 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:37,045 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/C is initiating minor compaction (all files) 2024-12-03T15:21:37,045 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/C in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:37,045 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/61cf08edb5444cb7bdad2eaeb6e1b864, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2d01526d6808453fb749b4e010fbef05, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/01e4df141f574d43b8f0823563094f4f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/952a5b45608b444a928792044f9791ca] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=47.8 K 2024-12-03T15:21:37,045 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53760 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:37,046 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/A is initiating minor compaction (all files) 2024-12-03T15:21:37,046 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/A in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:37,046 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/d38729c8dde5451e9d2efe9b024c0bc2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/ed5b1ff3c1a248feabb9305960c7bbbd, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/2ba216904e2f4486887d836aed0146ab, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/bef950c1f77f4695b2bfd783e3e8d0d3] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=52.5 K 2024-12-03T15:21:37,046 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61cf08edb5444cb7bdad2eaeb6e1b864, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733239294173 2024-12-03T15:21:37,046 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting d38729c8dde5451e9d2efe9b024c0bc2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733239294173 2024-12-03T15:21:37,047 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d01526d6808453fb749b4e010fbef05, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733239294536 2024-12-03T15:21:37,047 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting ed5b1ff3c1a248feabb9305960c7bbbd, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733239294536 2024-12-03T15:21:37,047 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01e4df141f574d43b8f0823563094f4f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733239294887 2024-12-03T15:21:37,047 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ba216904e2f4486887d836aed0146ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733239294887 2024-12-03T15:21:37,047 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 952a5b45608b444a928792044f9791ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733239295810 2024-12-03T15:21:37,047 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting bef950c1f77f4695b2bfd783e3e8d0d3, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733239295810 2024-12-03T15:21:37,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:37,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-03T15:21:37,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:37,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:37,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:37,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:37,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:37,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:37,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/7f069f7fcb8446cd9849b76390819133 is 50, key is test_row_0/A:col10/1733239297048/Put/seqid=0 2024-12-03T15:21:37,069 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#A#compaction#247 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:37,070 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/fbd359b8c328419ea78513a5d4c6b545 is 50, key is test_row_0/A:col10/1733239296456/Put/seqid=0 2024-12-03T15:21:37,076 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#C#compaction#248 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:37,076 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/1cf441354a2447a39bbd3bc424d207c4 is 50, key is test_row_0/C:col10/1733239296456/Put/seqid=0 2024-12-03T15:21:37,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239357098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239357099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239357105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239357105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742121_1297 (size=14541) 2024-12-03T15:21:37,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/7f069f7fcb8446cd9849b76390819133 2024-12-03T15:21:37,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742123_1299 (size=12663) 2024-12-03T15:21:37,154 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-03T15:21:37,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:37,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:37,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:37,155 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742122_1298 (size=12663) 2024-12-03T15:21:37,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239357154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,165 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/1cf441354a2447a39bbd3bc424d207c4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/1cf441354a2447a39bbd3bc424d207c4 2024-12-03T15:21:37,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/c00c246a3d0e445f87603e09958e7b52 is 50, key is test_row_0/B:col10/1733239297048/Put/seqid=0 2024-12-03T15:21:37,175 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/C of 5a9eb34ef535e1571d4c28ffefa7e658 into 1cf441354a2447a39bbd3bc424d207c4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:37,175 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:37,175 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/C, priority=12, startTime=1733239297043; duration=0sec 2024-12-03T15:21:37,176 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:37,176 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:C 2024-12-03T15:21:37,176 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:37,177 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48980 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:37,177 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/B is initiating minor compaction (all files) 2024-12-03T15:21:37,177 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/B in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:37,177 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/6989f4f1a17d4ebd8c787db0b1d098cb, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0948adb7e1ed4c3383d002cddd4d1ace, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d7452f1a29bc41299218a616465af068, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5822671b20e64df490240b4802ed2619] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=47.8 K 2024-12-03T15:21:37,180 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6989f4f1a17d4ebd8c787db0b1d098cb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1733239294173 2024-12-03T15:21:37,180 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0948adb7e1ed4c3383d002cddd4d1ace, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1733239294536 2024-12-03T15:21:37,181 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7452f1a29bc41299218a616465af068, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1733239294887 2024-12-03T15:21:37,181 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5822671b20e64df490240b4802ed2619, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733239295810 2024-12-03T15:21:37,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239357206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239357207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742124_1300 (size=12151) 2024-12-03T15:21:37,221 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#B#compaction#250 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:37,222 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d8226933f52f4d01893358f4b9537596 is 50, key is test_row_0/B:col10/1733239296456/Put/seqid=0 2024-12-03T15:21:37,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239357218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239357219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742125_1301 (size=12663) 2024-12-03T15:21:37,276 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d8226933f52f4d01893358f4b9537596 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d8226933f52f4d01893358f4b9537596 2024-12-03T15:21:37,282 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/B of 5a9eb34ef535e1571d4c28ffefa7e658 into d8226933f52f4d01893358f4b9537596(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:37,282 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:37,282 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/B, priority=12, startTime=1733239297043; duration=0sec 2024-12-03T15:21:37,282 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:37,282 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:B 2024-12-03T15:21:37,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-03T15:21:37,309 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-03T15:21:37,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:37,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:37,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:37,310 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239357420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239357422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239357429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239357430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,462 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-03T15:21:37,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:37,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:37,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:37,463 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,557 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/fbd359b8c328419ea78513a5d4c6b545 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/fbd359b8c328419ea78513a5d4c6b545 2024-12-03T15:21:37,563 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/A of 5a9eb34ef535e1571d4c28ffefa7e658 into fbd359b8c328419ea78513a5d4c6b545(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:37,563 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:37,563 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/A, priority=12, startTime=1733239297043; duration=0sec 2024-12-03T15:21:37,563 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:37,563 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:A 2024-12-03T15:21:37,617 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-03T15:21:37,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:37,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:37,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:37,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:37,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/c00c246a3d0e445f87603e09958e7b52 2024-12-03T15:21:37,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239357679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,687 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/c5474e1d1f914a3097d3d03edd0e810d is 50, key is test_row_0/C:col10/1733239297048/Put/seqid=0 2024-12-03T15:21:37,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742126_1302 (size=12151) 2024-12-03T15:21:37,710 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/c5474e1d1f914a3097d3d03edd0e810d 2024-12-03T15:21:37,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/7f069f7fcb8446cd9849b76390819133 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7f069f7fcb8446cd9849b76390819133 2024-12-03T15:21:37,731 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239357730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239357736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7f069f7fcb8446cd9849b76390819133, entries=200, sequenceid=245, filesize=14.2 K 2024-12-03T15:21:37,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/c00c246a3d0e445f87603e09958e7b52 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/c00c246a3d0e445f87603e09958e7b52 2024-12-03T15:21:37,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239357739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:37,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239357742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/c00c246a3d0e445f87603e09958e7b52, entries=150, sequenceid=245, filesize=11.9 K 2024-12-03T15:21:37,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/c5474e1d1f914a3097d3d03edd0e810d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/c5474e1d1f914a3097d3d03edd0e810d 2024-12-03T15:21:37,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/c5474e1d1f914a3097d3d03edd0e810d, entries=150, sequenceid=245, filesize=11.9 K 2024-12-03T15:21:37,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 5a9eb34ef535e1571d4c28ffefa7e658 in 709ms, sequenceid=245, compaction requested=false 2024-12-03T15:21:37,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:37,771 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:37,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-03T15:21:37,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:37,775 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-03T15:21:37,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:37,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:37,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:37,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:37,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:37,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:37,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/6dfbaa01aceb4ab2a37ae0094686b9f4 is 50, key is test_row_0/A:col10/1733239297103/Put/seqid=0 2024-12-03T15:21:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-03T15:21:37,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742127_1303 (size=12301) 2024-12-03T15:21:37,818 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/6dfbaa01aceb4ab2a37ae0094686b9f4 2024-12-03T15:21:37,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/5a2c3e377e4a4a7bb83c9f2ab8568c00 is 50, key is test_row_0/B:col10/1733239297103/Put/seqid=0 2024-12-03T15:21:37,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742128_1304 (size=12301) 2024-12-03T15:21:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:38,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:38,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239358274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239358275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239358276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239358277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,294 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/5a2c3e377e4a4a7bb83c9f2ab8568c00 2024-12-03T15:21:38,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/030cf2065b7f46959f86434d8bc7714b is 50, key is test_row_0/C:col10/1733239297103/Put/seqid=0 2024-12-03T15:21:38,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239358382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239358382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239358386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239358382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742129_1305 (size=12301) 2024-12-03T15:21:38,393 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/030cf2065b7f46959f86434d8bc7714b 2024-12-03T15:21:38,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/6dfbaa01aceb4ab2a37ae0094686b9f4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/6dfbaa01aceb4ab2a37ae0094686b9f4 2024-12-03T15:21:38,425 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/6dfbaa01aceb4ab2a37ae0094686b9f4, entries=150, sequenceid=271, filesize=12.0 K 2024-12-03T15:21:38,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/5a2c3e377e4a4a7bb83c9f2ab8568c00 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5a2c3e377e4a4a7bb83c9f2ab8568c00 2024-12-03T15:21:38,448 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5a2c3e377e4a4a7bb83c9f2ab8568c00, entries=150, sequenceid=271, filesize=12.0 K 2024-12-03T15:21:38,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/030cf2065b7f46959f86434d8bc7714b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/030cf2065b7f46959f86434d8bc7714b 2024-12-03T15:21:38,464 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/030cf2065b7f46959f86434d8bc7714b, entries=150, sequenceid=271, filesize=12.0 K 2024-12-03T15:21:38,465 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 5a9eb34ef535e1571d4c28ffefa7e658 in 691ms, sequenceid=271, compaction requested=true 2024-12-03T15:21:38,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:38,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:38,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-03T15:21:38,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-03T15:21:38,482 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-03T15:21:38,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7920 sec 2024-12-03T15:21:38,485 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.8150 sec 2024-12-03T15:21:38,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-03T15:21:38,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:38,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:38,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:38,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:38,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:38,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:38,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:38,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/40658268c45149a09bed674a2d2c2cce is 50, key is test_row_0/A:col10/1733239298263/Put/seqid=0 2024-12-03T15:21:38,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742130_1306 (size=12301) 2024-12-03T15:21:38,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239358612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239358614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239358615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239358616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239358691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239358718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239358718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239358720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239358720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-03T15:21:38,811 INFO [Thread-1165 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-03T15:21:38,813 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:38,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-03T15:21:38,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-03T15:21:38,818 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:38,818 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:38,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:38,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-03T15:21:38,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239358921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239358921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239358923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:38,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239358924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,971 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:38,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-03T15:21:38,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:38,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:38,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:38,972 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:38,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:38,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,019 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/40658268c45149a09bed674a2d2c2cce 2024-12-03T15:21:39,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/e28c88aa850e425bad43ee2b0c591dea is 50, key is test_row_0/B:col10/1733239298263/Put/seqid=0 2024-12-03T15:21:39,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742131_1307 (size=12301) 2024-12-03T15:21:39,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/e28c88aa850e425bad43ee2b0c591dea 2024-12-03T15:21:39,107 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/eeaf8e000806471794a7d78f74036ef3 is 50, key is test_row_0/C:col10/1733239298263/Put/seqid=0 2024-12-03T15:21:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-03T15:21:39,124 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-03T15:21:39,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:39,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,127 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742132_1308 (size=12301) 2024-12-03T15:21:39,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/eeaf8e000806471794a7d78f74036ef3 2024-12-03T15:21:39,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/40658268c45149a09bed674a2d2c2cce as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/40658268c45149a09bed674a2d2c2cce 2024-12-03T15:21:39,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/40658268c45149a09bed674a2d2c2cce, entries=150, sequenceid=285, filesize=12.0 K 2024-12-03T15:21:39,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/e28c88aa850e425bad43ee2b0c591dea as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/e28c88aa850e425bad43ee2b0c591dea 2024-12-03T15:21:39,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/e28c88aa850e425bad43ee2b0c591dea, entries=150, sequenceid=285, filesize=12.0 K 2024-12-03T15:21:39,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/eeaf8e000806471794a7d78f74036ef3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/eeaf8e000806471794a7d78f74036ef3 2024-12-03T15:21:39,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/eeaf8e000806471794a7d78f74036ef3, entries=150, sequenceid=285, filesize=12.0 K 2024-12-03T15:21:39,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 5a9eb34ef535e1571d4c28ffefa7e658 in 619ms, sequenceid=285, compaction requested=true 2024-12-03T15:21:39,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:39,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:39,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:39,207 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:39,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:39,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:39,207 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:39,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:39,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:39,210 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49416 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:39,210 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/B is initiating minor compaction (all files) 2024-12-03T15:21:39,210 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/B in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,210 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d8226933f52f4d01893358f4b9537596, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/c00c246a3d0e445f87603e09958e7b52, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5a2c3e377e4a4a7bb83c9f2ab8568c00, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/e28c88aa850e425bad43ee2b0c591dea] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=48.3 K 2024-12-03T15:21:39,211 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting d8226933f52f4d01893358f4b9537596, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733239295810 2024-12-03T15:21:39,211 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51806 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:39,211 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/A is initiating minor compaction (all files) 2024-12-03T15:21:39,211 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/A in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,211 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/fbd359b8c328419ea78513a5d4c6b545, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7f069f7fcb8446cd9849b76390819133, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/6dfbaa01aceb4ab2a37ae0094686b9f4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/40658268c45149a09bed674a2d2c2cce] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=50.6 K 2024-12-03T15:21:39,211 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c00c246a3d0e445f87603e09958e7b52, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733239296493 2024-12-03T15:21:39,212 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting fbd359b8c328419ea78513a5d4c6b545, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733239295810 2024-12-03T15:21:39,212 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a2c3e377e4a4a7bb83c9f2ab8568c00, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733239297092 2024-12-03T15:21:39,212 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f069f7fcb8446cd9849b76390819133, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733239296493 2024-12-03T15:21:39,212 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting e28c88aa850e425bad43ee2b0c591dea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733239298263 2024-12-03T15:21:39,212 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6dfbaa01aceb4ab2a37ae0094686b9f4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733239297092 2024-12-03T15:21:39,213 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40658268c45149a09bed674a2d2c2cce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733239298263 2024-12-03T15:21:39,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:39,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-03T15:21:39,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:39,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:39,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:39,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:39,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:39,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:39,244 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#B#compaction#258 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:39,245 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/cd5b73b9b02f47229316bf16877ad6c4 is 50, key is test_row_0/B:col10/1733239298263/Put/seqid=0 2024-12-03T15:21:39,252 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#A#compaction#259 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:39,253 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/1e15f607d8894ca7bc806451546abe64 is 50, key is test_row_0/A:col10/1733239298263/Put/seqid=0 2024-12-03T15:21:39,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/7f0d64c5bd9840e1a1f35e23aeb51823 is 50, key is test_row_0/A:col10/1733239299225/Put/seqid=0 2024-12-03T15:21:39,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742134_1310 (size=12949) 2024-12-03T15:21:39,273 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/1e15f607d8894ca7bc806451546abe64 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1e15f607d8894ca7bc806451546abe64 2024-12-03T15:21:39,279 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-03T15:21:39,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:39,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,280 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239359254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,287 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/A of 5a9eb34ef535e1571d4c28ffefa7e658 into 1e15f607d8894ca7bc806451546abe64(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:39,287 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:39,287 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/A, priority=12, startTime=1733239299207; duration=0sec 2024-12-03T15:21:39,287 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:39,287 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:A 2024-12-03T15:21:39,287 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:39,289 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49416 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:39,289 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/C is initiating minor compaction (all files) 2024-12-03T15:21:39,289 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/C in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,289 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/1cf441354a2447a39bbd3bc424d207c4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/c5474e1d1f914a3097d3d03edd0e810d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/030cf2065b7f46959f86434d8bc7714b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/eeaf8e000806471794a7d78f74036ef3] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=48.3 K 2024-12-03T15:21:39,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239359287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239359288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239359288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,299 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cf441354a2447a39bbd3bc424d207c4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733239295810 2024-12-03T15:21:39,300 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5474e1d1f914a3097d3d03edd0e810d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1733239296493 2024-12-03T15:21:39,300 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 030cf2065b7f46959f86434d8bc7714b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733239297092 2024-12-03T15:21:39,301 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting eeaf8e000806471794a7d78f74036ef3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733239298263 2024-12-03T15:21:39,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742133_1309 (size=12949) 2024-12-03T15:21:39,309 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/cd5b73b9b02f47229316bf16877ad6c4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/cd5b73b9b02f47229316bf16877ad6c4 2024-12-03T15:21:39,317 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/B of 5a9eb34ef535e1571d4c28ffefa7e658 into cd5b73b9b02f47229316bf16877ad6c4(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:39,317 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:39,317 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/B, priority=12, startTime=1733239299207; duration=0sec 2024-12-03T15:21:39,317 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:39,317 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:B 2024-12-03T15:21:39,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742135_1311 (size=14741) 2024-12-03T15:21:39,322 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#C#compaction#261 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:39,323 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/4f2af9d5f10d42bb8c28bf6646bddc2e is 50, key is test_row_0/C:col10/1733239298263/Put/seqid=0 2024-12-03T15:21:39,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742136_1312 (size=12949) 2024-12-03T15:21:39,340 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/4f2af9d5f10d42bb8c28bf6646bddc2e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/4f2af9d5f10d42bb8c28bf6646bddc2e 2024-12-03T15:21:39,347 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/C of 5a9eb34ef535e1571d4c28ffefa7e658 into 4f2af9d5f10d42bb8c28bf6646bddc2e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:39,347 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:39,347 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/C, priority=12, startTime=1733239299207; duration=0sec 2024-12-03T15:21:39,347 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:39,347 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:C 2024-12-03T15:21:39,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239359388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239359393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239359393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239359393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-03T15:21:39,432 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-03T15:21:39,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:39,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239359591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,593 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-03T15:21:39,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:39,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,594 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239359596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239359596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239359596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,726 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/7f0d64c5bd9840e1a1f35e23aeb51823 2024-12-03T15:21:39,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/c71189ac12394c2a96ec10bea1c633b2 is 50, key is test_row_0/B:col10/1733239299225/Put/seqid=0 2024-12-03T15:21:39,748 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-03T15:21:39,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:39,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,749 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742137_1313 (size=12301) 2024-12-03T15:21:39,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239359898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,901 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-03T15:21:39,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:39,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:39,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239359901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,902 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:39,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:39,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239359903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239359903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:39,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-03T15:21:40,055 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:40,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-03T15:21:40,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:40,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:40,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:40,056 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:40,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:40,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:40,200 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/c71189ac12394c2a96ec10bea1c633b2 2024-12-03T15:21:40,213 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:40,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-03T15:21:40,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:40,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:40,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:40,218 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:40,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:40,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:40,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/aac8bfadf3cc416fa55ba2ba227fe226 is 50, key is test_row_0/C:col10/1733239299225/Put/seqid=0 2024-12-03T15:21:40,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742138_1314 (size=12301) 2024-12-03T15:21:40,371 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:40,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-03T15:21:40,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:40,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:40,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:40,372 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:40,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:40,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:40,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239360400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:40,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239360410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:40,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:40,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:40,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239360411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:40,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239360411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:40,539 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:40,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-03T15:21:40,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:40,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:40,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:40,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:40,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:40,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/aac8bfadf3cc416fa55ba2ba227fe226 2024-12-03T15:21:40,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/7f0d64c5bd9840e1a1f35e23aeb51823 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7f0d64c5bd9840e1a1f35e23aeb51823 2024-12-03T15:21:40,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7f0d64c5bd9840e1a1f35e23aeb51823, entries=200, sequenceid=308, filesize=14.4 K 2024-12-03T15:21:40,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/c71189ac12394c2a96ec10bea1c633b2 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/c71189ac12394c2a96ec10bea1c633b2 2024-12-03T15:21:40,662 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/c71189ac12394c2a96ec10bea1c633b2, entries=150, sequenceid=308, filesize=12.0 K 2024-12-03T15:21:40,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/aac8bfadf3cc416fa55ba2ba227fe226 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/aac8bfadf3cc416fa55ba2ba227fe226 2024-12-03T15:21:40,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/aac8bfadf3cc416fa55ba2ba227fe226, entries=150, sequenceid=308, filesize=12.0 K 2024-12-03T15:21:40,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 5a9eb34ef535e1571d4c28ffefa7e658 in 1443ms, sequenceid=308, compaction requested=false 2024-12-03T15:21:40,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:40,696 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:40,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-03T15:21:40,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:40,698 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-03T15:21:40,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:40,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:40,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:40,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:40,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:40,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:40,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:40,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:40,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/d95b1ae563624b128d880fd0a5f4d0fa is 50, key is test_row_0/A:col10/1733239299250/Put/seqid=0 2024-12-03T15:21:40,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742139_1315 (size=12301) 2024-12-03T15:21:40,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:40,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239360776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:40,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:40,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239360880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:40,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-03T15:21:41,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:41,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239361098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:41,158 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/d95b1ae563624b128d880fd0a5f4d0fa 2024-12-03T15:21:41,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/8caae91492bb41038b7384db3d98de99 is 50, key is test_row_0/B:col10/1733239299250/Put/seqid=0 2024-12-03T15:21:41,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742140_1316 (size=12301) 2024-12-03T15:21:41,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:41,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239361406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:41,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:41,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239361411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:41,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:41,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239361415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:41,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:41,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239361422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:41,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:41,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239361426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:41,606 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/8caae91492bb41038b7384db3d98de99 2024-12-03T15:21:41,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/2e6e0374c6cf4aa79b879c19dd77a1ae is 50, key is test_row_0/C:col10/1733239299250/Put/seqid=0 2024-12-03T15:21:41,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742141_1317 (size=12301) 2024-12-03T15:21:41,639 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=324 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/2e6e0374c6cf4aa79b879c19dd77a1ae 2024-12-03T15:21:41,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/d95b1ae563624b128d880fd0a5f4d0fa as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/d95b1ae563624b128d880fd0a5f4d0fa 2024-12-03T15:21:41,658 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/d95b1ae563624b128d880fd0a5f4d0fa, entries=150, sequenceid=324, filesize=12.0 K 2024-12-03T15:21:41,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/8caae91492bb41038b7384db3d98de99 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8caae91492bb41038b7384db3d98de99 2024-12-03T15:21:41,666 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8caae91492bb41038b7384db3d98de99, entries=150, sequenceid=324, filesize=12.0 K 2024-12-03T15:21:41,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/2e6e0374c6cf4aa79b879c19dd77a1ae as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2e6e0374c6cf4aa79b879c19dd77a1ae 2024-12-03T15:21:41,672 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2e6e0374c6cf4aa79b879c19dd77a1ae, entries=150, sequenceid=324, filesize=12.0 K 2024-12-03T15:21:41,673 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 5a9eb34ef535e1571d4c28ffefa7e658 in 975ms, sequenceid=324, compaction requested=true 2024-12-03T15:21:41,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:41,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:41,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-03T15:21:41,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-03T15:21:41,679 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-03T15:21:41,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8590 sec 2024-12-03T15:21:41,682 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 2.8670 sec 2024-12-03T15:21:41,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:41,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-03T15:21:41,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:41,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:41,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:41,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:41,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:41,917 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:41,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/dd2089bb529e4c0494bb50387768af25 is 50, key is test_row_0/A:col10/1733239300768/Put/seqid=0 2024-12-03T15:21:41,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:41,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239361947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:41,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742142_1318 (size=12301) 2024-12-03T15:21:42,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239362052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:42,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:42,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239362257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:42,356 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/dd2089bb529e4c0494bb50387768af25 2024-12-03T15:21:42,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/0ef6300c618141ad80f56620b81988ea is 50, key is test_row_0/B:col10/1733239300768/Put/seqid=0 2024-12-03T15:21:42,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742143_1319 (size=12301) 2024-12-03T15:21:42,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:42,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239362562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:42,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/0ef6300c618141ad80f56620b81988ea 2024-12-03T15:21:42,839 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/04e5891c3d9e405c9ca1155d964317fd is 50, key is test_row_0/C:col10/1733239300768/Put/seqid=0 2024-12-03T15:21:42,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742144_1320 (size=12301) 2024-12-03T15:21:42,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=348 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/04e5891c3d9e405c9ca1155d964317fd 2024-12-03T15:21:42,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/dd2089bb529e4c0494bb50387768af25 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/dd2089bb529e4c0494bb50387768af25 2024-12-03T15:21:42,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/dd2089bb529e4c0494bb50387768af25, entries=150, sequenceid=348, filesize=12.0 K 2024-12-03T15:21:42,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/0ef6300c618141ad80f56620b81988ea as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0ef6300c618141ad80f56620b81988ea 2024-12-03T15:21:42,881 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0ef6300c618141ad80f56620b81988ea, entries=150, sequenceid=348, filesize=12.0 K 2024-12-03T15:21:42,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/04e5891c3d9e405c9ca1155d964317fd as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/04e5891c3d9e405c9ca1155d964317fd 2024-12-03T15:21:42,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/04e5891c3d9e405c9ca1155d964317fd, entries=150, sequenceid=348, filesize=12.0 K 2024-12-03T15:21:42,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 5a9eb34ef535e1571d4c28ffefa7e658 in 973ms, sequenceid=348, compaction requested=true 2024-12-03T15:21:42,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:42,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:42,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:42,889 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:42,889 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:42,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:42,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:42,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:42,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:42,892 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52292 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:42,892 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/A is initiating minor compaction (all files) 2024-12-03T15:21:42,892 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/A in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:42,892 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1e15f607d8894ca7bc806451546abe64, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7f0d64c5bd9840e1a1f35e23aeb51823, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/d95b1ae563624b128d880fd0a5f4d0fa, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/dd2089bb529e4c0494bb50387768af25] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=51.1 K 2024-12-03T15:21:42,892 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:42,893 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/B is initiating minor compaction (all files) 2024-12-03T15:21:42,893 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/B in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:42,893 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/cd5b73b9b02f47229316bf16877ad6c4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/c71189ac12394c2a96ec10bea1c633b2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8caae91492bb41038b7384db3d98de99, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0ef6300c618141ad80f56620b81988ea] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=48.7 K 2024-12-03T15:21:42,895 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e15f607d8894ca7bc806451546abe64, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733239298263 2024-12-03T15:21:42,895 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting cd5b73b9b02f47229316bf16877ad6c4, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733239298263 2024-12-03T15:21:42,896 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f0d64c5bd9840e1a1f35e23aeb51823, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1733239298612 2024-12-03T15:21:42,896 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c71189ac12394c2a96ec10bea1c633b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1733239298613 2024-12-03T15:21:42,896 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting d95b1ae563624b128d880fd0a5f4d0fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733239299250 2024-12-03T15:21:42,896 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8caae91492bb41038b7384db3d98de99, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733239299250 2024-12-03T15:21:42,897 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ef6300c618141ad80f56620b81988ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1733239300768 2024-12-03T15:21:42,897 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd2089bb529e4c0494bb50387768af25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1733239300768 2024-12-03T15:21:42,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-03T15:21:42,932 INFO [Thread-1165 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-03T15:21:42,932 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#A#compaction#270 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:42,933 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/689d1932aaef49c2baa3c0fecd669d0a is 50, key is test_row_0/A:col10/1733239300768/Put/seqid=0 2024-12-03T15:21:42,933 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#B#compaction#271 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:42,935 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d8fa0eecbc5e4e1bb02f96dcac6228bb is 50, key is test_row_0/B:col10/1733239300768/Put/seqid=0 2024-12-03T15:21:42,938 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:42,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-03T15:21:42,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-03T15:21:42,951 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:42,952 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:42,952 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:42,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742145_1321 (size=13085) 2024-12-03T15:21:42,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742146_1322 (size=13085) 2024-12-03T15:21:42,976 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d8fa0eecbc5e4e1bb02f96dcac6228bb as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d8fa0eecbc5e4e1bb02f96dcac6228bb 2024-12-03T15:21:42,986 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/B of 5a9eb34ef535e1571d4c28ffefa7e658 into d8fa0eecbc5e4e1bb02f96dcac6228bb(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:42,986 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:42,986 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/B, priority=12, startTime=1733239302889; duration=0sec 2024-12-03T15:21:42,986 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:42,986 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:B 2024-12-03T15:21:42,986 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:42,988 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:42,988 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/C is initiating minor compaction (all files) 2024-12-03T15:21:42,988 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/C in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:42,988 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/4f2af9d5f10d42bb8c28bf6646bddc2e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/aac8bfadf3cc416fa55ba2ba227fe226, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2e6e0374c6cf4aa79b879c19dd77a1ae, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/04e5891c3d9e405c9ca1155d964317fd] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=48.7 K 2024-12-03T15:21:42,989 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f2af9d5f10d42bb8c28bf6646bddc2e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1733239298263 2024-12-03T15:21:42,990 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting aac8bfadf3cc416fa55ba2ba227fe226, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1733239298613 2024-12-03T15:21:42,991 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e6e0374c6cf4aa79b879c19dd77a1ae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=324, earliestPutTs=1733239299250 2024-12-03T15:21:42,998 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 04e5891c3d9e405c9ca1155d964317fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1733239300768 2024-12-03T15:21:43,016 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#C#compaction#272 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:43,017 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/c745367f62424858abf7bd0dc5fd70c7 is 50, key is test_row_0/C:col10/1733239300768/Put/seqid=0 2024-12-03T15:21:43,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-03T15:21:43,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742147_1323 (size=13085) 2024-12-03T15:21:43,084 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/c745367f62424858abf7bd0dc5fd70c7 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/c745367f62424858abf7bd0dc5fd70c7 2024-12-03T15:21:43,092 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/C of 5a9eb34ef535e1571d4c28ffefa7e658 into c745367f62424858abf7bd0dc5fd70c7(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:43,092 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:43,092 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/C, priority=12, startTime=1733239302890; duration=0sec 2024-12-03T15:21:43,092 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:43,092 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:C 2024-12-03T15:21:43,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-03T15:21:43,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:43,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:43,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:43,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:43,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:43,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:43,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:43,104 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-03T15:21:43,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:43,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:43,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:43,106 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,112 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/63faef4983f84765979697dafe783809 is 50, key is test_row_0/A:col10/1733239301940/Put/seqid=0 2024-12-03T15:21:43,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742148_1324 (size=12301) 2024-12-03T15:21:43,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:43,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239363204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-03T15:21:43,258 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-03T15:21:43,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:43,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:43,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:43,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:43,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239363309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,360 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/689d1932aaef49c2baa3c0fecd669d0a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/689d1932aaef49c2baa3c0fecd669d0a 2024-12-03T15:21:43,368 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/A of 5a9eb34ef535e1571d4c28ffefa7e658 into 689d1932aaef49c2baa3c0fecd669d0a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:43,368 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:43,368 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/A, priority=12, startTime=1733239302889; duration=0sec 2024-12-03T15:21:43,368 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:43,368 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:A 2024-12-03T15:21:43,412 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-03T15:21:43,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:43,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:43,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:43,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239363429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239363430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,432 DEBUG [Thread-1161 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:21:43,433 DEBUG [Thread-1157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:21:43,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239363436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,441 DEBUG [Thread-1155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:21:43,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239363450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,454 DEBUG [Thread-1163 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:21:43,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:43,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239363522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/63faef4983f84765979697dafe783809 2024-12-03T15:21:43,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-03T15:21:43,567 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-03T15:21:43,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:43,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:43,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:43,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,583 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d9a284608bc4494f8ac873d092c86a86 is 50, key is test_row_0/B:col10/1733239301940/Put/seqid=0 2024-12-03T15:21:43,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742149_1325 (size=12301) 2024-12-03T15:21:43,727 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-03T15:21:43,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:43,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:43,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:43,733 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:43,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239363826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,886 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:43,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-03T15:21:43,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:43,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:43,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:43,887 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:43,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:44,006 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d9a284608bc4494f8ac873d092c86a86 2024-12-03T15:21:44,014 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/e186d8538baa4d5b9914eca757120dc8 is 50, key is test_row_0/C:col10/1733239301940/Put/seqid=0 2024-12-03T15:21:44,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742150_1326 (size=12301) 2024-12-03T15:21:44,041 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:44,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-03T15:21:44,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:44,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:44,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:44,046 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:44,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:44,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:44,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-03T15:21:44,204 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:44,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-03T15:21:44,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:44,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:44,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:44,205 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:44,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:44,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:44,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:44,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239364336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:44,360 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:44,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-03T15:21:44,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:44,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:44,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:44,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:44,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:44,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:44,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/e186d8538baa4d5b9914eca757120dc8 2024-12-03T15:21:44,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/63faef4983f84765979697dafe783809 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/63faef4983f84765979697dafe783809 2024-12-03T15:21:44,445 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/63faef4983f84765979697dafe783809, entries=150, sequenceid=363, filesize=12.0 K 2024-12-03T15:21:44,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d9a284608bc4494f8ac873d092c86a86 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d9a284608bc4494f8ac873d092c86a86 2024-12-03T15:21:44,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d9a284608bc4494f8ac873d092c86a86, entries=150, sequenceid=363, filesize=12.0 K 2024-12-03T15:21:44,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/e186d8538baa4d5b9914eca757120dc8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/e186d8538baa4d5b9914eca757120dc8 2024-12-03T15:21:44,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/e186d8538baa4d5b9914eca757120dc8, entries=150, sequenceid=363, filesize=12.0 K 2024-12-03T15:21:44,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 5a9eb34ef535e1571d4c28ffefa7e658 in 1366ms, sequenceid=363, compaction requested=false 2024-12-03T15:21:44,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:44,525 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:44,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-03T15:21:44,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:44,530 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-03T15:21:44,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:44,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:44,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:44,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:44,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:44,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:44,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/c248963abecb4ce0b2c1fbd527a52b27 is 50, key is test_row_0/A:col10/1733239303194/Put/seqid=0 2024-12-03T15:21:44,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742151_1327 (size=12301) 2024-12-03T15:21:44,990 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/c248963abecb4ce0b2c1fbd527a52b27 2024-12-03T15:21:45,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/2747a3790a074a03bbe9c4700248d951 is 50, key is test_row_0/B:col10/1733239303194/Put/seqid=0 2024-12-03T15:21:45,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-03T15:21:45,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742152_1328 (size=12301) 2024-12-03T15:21:45,074 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/2747a3790a074a03bbe9c4700248d951 2024-12-03T15:21:45,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/7c32abe52fdd4fc7bf8b777c5599d18a is 50, key is test_row_0/C:col10/1733239303194/Put/seqid=0 2024-12-03T15:21:45,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742153_1329 (size=12301) 2024-12-03T15:21:45,129 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/7c32abe52fdd4fc7bf8b777c5599d18a 2024-12-03T15:21:45,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/c248963abecb4ce0b2c1fbd527a52b27 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/c248963abecb4ce0b2c1fbd527a52b27 2024-12-03T15:21:45,159 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/c248963abecb4ce0b2c1fbd527a52b27, entries=150, sequenceid=387, filesize=12.0 K 2024-12-03T15:21:45,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/2747a3790a074a03bbe9c4700248d951 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/2747a3790a074a03bbe9c4700248d951 2024-12-03T15:21:45,166 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/2747a3790a074a03bbe9c4700248d951, entries=150, sequenceid=387, filesize=12.0 K 2024-12-03T15:21:45,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/7c32abe52fdd4fc7bf8b777c5599d18a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7c32abe52fdd4fc7bf8b777c5599d18a 2024-12-03T15:21:45,174 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7c32abe52fdd4fc7bf8b777c5599d18a, entries=150, sequenceid=387, filesize=12.0 K 2024-12-03T15:21:45,175 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 5a9eb34ef535e1571d4c28ffefa7e658 in 645ms, sequenceid=387, compaction requested=true 2024-12-03T15:21:45,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:45,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:45,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-03T15:21:45,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-03T15:21:45,191 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-03T15:21:45,191 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2320 sec 2024-12-03T15:21:45,193 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 2.2540 sec 2024-12-03T15:21:45,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:45,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:21:45,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:45,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:45,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:45,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:45,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:45,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:45,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/4ea151acc80441f984b40a805bc65e92 is 50, key is test_row_1/A:col10/1733239305365/Put/seqid=0 2024-12-03T15:21:45,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742154_1330 (size=9857) 2024-12-03T15:21:45,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/4ea151acc80441f984b40a805bc65e92 2024-12-03T15:21:45,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/4b980548a3074c8ebf8063c7d9703693 is 50, key is test_row_1/B:col10/1733239305365/Put/seqid=0 2024-12-03T15:21:45,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742155_1331 (size=9857) 2024-12-03T15:21:45,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:45,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239365585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:45,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:45,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239365688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:45,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/4b980548a3074c8ebf8063c7d9703693 2024-12-03T15:21:45,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/94ceb887bc88445d8751a8e1d2c1c83a is 50, key is test_row_1/C:col10/1733239305365/Put/seqid=0 2024-12-03T15:21:45,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:45,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239365898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:45,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742156_1332 (size=9857) 2024-12-03T15:21:46,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:46,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239366203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:46,308 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/94ceb887bc88445d8751a8e1d2c1c83a 2024-12-03T15:21:46,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/4ea151acc80441f984b40a805bc65e92 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4ea151acc80441f984b40a805bc65e92 2024-12-03T15:21:46,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4ea151acc80441f984b40a805bc65e92, entries=100, sequenceid=398, filesize=9.6 K 2024-12-03T15:21:46,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/4b980548a3074c8ebf8063c7d9703693 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/4b980548a3074c8ebf8063c7d9703693 2024-12-03T15:21:46,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/4b980548a3074c8ebf8063c7d9703693, entries=100, sequenceid=398, filesize=9.6 K 2024-12-03T15:21:46,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/94ceb887bc88445d8751a8e1d2c1c83a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/94ceb887bc88445d8751a8e1d2c1c83a 2024-12-03T15:21:46,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/94ceb887bc88445d8751a8e1d2c1c83a, entries=100, sequenceid=398, filesize=9.6 K 2024-12-03T15:21:46,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 5a9eb34ef535e1571d4c28ffefa7e658 in 1028ms, sequenceid=398, compaction requested=true 2024-12-03T15:21:46,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:46,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:21:46,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:46,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:21:46,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:21:46,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 5a9eb34ef535e1571d4c28ffefa7e658:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:21:46,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-03T15:21:46,395 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:46,396 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:46,400 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:46,400 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/A is initiating minor compaction (all files) 2024-12-03T15:21:46,400 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/A in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:46,400 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/689d1932aaef49c2baa3c0fecd669d0a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/63faef4983f84765979697dafe783809, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/c248963abecb4ce0b2c1fbd527a52b27, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4ea151acc80441f984b40a805bc65e92] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=46.4 K 2024-12-03T15:21:46,401 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 689d1932aaef49c2baa3c0fecd669d0a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1733239300768 2024-12-03T15:21:46,402 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 63faef4983f84765979697dafe783809, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733239301940 2024-12-03T15:21:46,403 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c248963abecb4ce0b2c1fbd527a52b27, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733239303194 2024-12-03T15:21:46,404 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ea151acc80441f984b40a805bc65e92, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733239305362 2024-12-03T15:21:46,404 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:46,404 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/C is initiating minor compaction (all files) 2024-12-03T15:21:46,404 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/C in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:46,404 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/c745367f62424858abf7bd0dc5fd70c7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/e186d8538baa4d5b9914eca757120dc8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7c32abe52fdd4fc7bf8b777c5599d18a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/94ceb887bc88445d8751a8e1d2c1c83a] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=46.4 K 2024-12-03T15:21:46,406 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting c745367f62424858abf7bd0dc5fd70c7, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1733239300768 2024-12-03T15:21:46,406 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting e186d8538baa4d5b9914eca757120dc8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733239301940 2024-12-03T15:21:46,407 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c32abe52fdd4fc7bf8b777c5599d18a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733239303194 2024-12-03T15:21:46,407 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94ceb887bc88445d8751a8e1d2c1c83a, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733239305362 2024-12-03T15:21:46,456 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#C#compaction#282 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:46,457 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/8e65937644c34ecca2cb826a84c7fff8 is 50, key is test_row_0/C:col10/1733239303194/Put/seqid=0 2024-12-03T15:21:46,459 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#A#compaction#283 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:46,460 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/1470d59d043343b380b50ba015904147 is 50, key is test_row_0/A:col10/1733239303194/Put/seqid=0 2024-12-03T15:21:46,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742158_1334 (size=13221) 2024-12-03T15:21:46,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742157_1333 (size=13221) 2024-12-03T15:21:46,495 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/1470d59d043343b380b50ba015904147 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1470d59d043343b380b50ba015904147 2024-12-03T15:21:46,516 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/A of 5a9eb34ef535e1571d4c28ffefa7e658 into 1470d59d043343b380b50ba015904147(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:46,516 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:46,516 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/A, priority=12, startTime=1733239306394; duration=0sec 2024-12-03T15:21:46,516 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:21:46,516 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:A 2024-12-03T15:21:46,516 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:21:46,518 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:21:46,518 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 5a9eb34ef535e1571d4c28ffefa7e658/B is initiating minor compaction (all files) 2024-12-03T15:21:46,518 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 5a9eb34ef535e1571d4c28ffefa7e658/B in TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:46,518 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d8fa0eecbc5e4e1bb02f96dcac6228bb, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d9a284608bc4494f8ac873d092c86a86, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/2747a3790a074a03bbe9c4700248d951, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/4b980548a3074c8ebf8063c7d9703693] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp, totalSize=46.4 K 2024-12-03T15:21:46,519 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting d8fa0eecbc5e4e1bb02f96dcac6228bb, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=348, earliestPutTs=1733239300768 2024-12-03T15:21:46,519 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting d9a284608bc4494f8ac873d092c86a86, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733239301940 2024-12-03T15:21:46,520 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 2747a3790a074a03bbe9c4700248d951, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733239303194 2024-12-03T15:21:46,520 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b980548a3074c8ebf8063c7d9703693, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1733239305362 2024-12-03T15:21:46,529 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 5a9eb34ef535e1571d4c28ffefa7e658#B#compaction#284 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:21:46,529 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/45962586f22242479d3667daeac47443 is 50, key is test_row_0/B:col10/1733239303194/Put/seqid=0 2024-12-03T15:21:46,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742159_1335 (size=13221) 2024-12-03T15:21:46,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:46,709 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-03T15:21:46,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:46,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:46,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:46,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:46,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:46,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:46,720 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/83fa8d8f7e964c93bf1b02712f50e3bc is 50, key is test_row_0/A:col10/1733239306707/Put/seqid=0 2024-12-03T15:21:46,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:46,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239366737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:46,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742160_1336 (size=12301) 2024-12-03T15:21:46,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/83fa8d8f7e964c93bf1b02712f50e3bc 2024-12-03T15:21:46,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/87095c608543433d9ffe87378d6fe911 is 50, key is test_row_0/B:col10/1733239306707/Put/seqid=0 2024-12-03T15:21:46,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742161_1337 (size=12301) 2024-12-03T15:21:46,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:46,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239366842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:46,886 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/8e65937644c34ecca2cb826a84c7fff8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/8e65937644c34ecca2cb826a84c7fff8 2024-12-03T15:21:46,901 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/C of 5a9eb34ef535e1571d4c28ffefa7e658 into 8e65937644c34ecca2cb826a84c7fff8(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:46,901 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:46,901 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/C, priority=12, startTime=1733239306395; duration=0sec 2024-12-03T15:21:46,901 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:46,901 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:C 2024-12-03T15:21:46,956 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/45962586f22242479d3667daeac47443 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/45962586f22242479d3667daeac47443 2024-12-03T15:21:46,964 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 5a9eb34ef535e1571d4c28ffefa7e658/B of 5a9eb34ef535e1571d4c28ffefa7e658 into 45962586f22242479d3667daeac47443(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:21:46,964 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:46,964 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., storeName=5a9eb34ef535e1571d4c28ffefa7e658/B, priority=12, startTime=1733239306394; duration=0sec 2024-12-03T15:21:46,964 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:21:46,964 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 5a9eb34ef535e1571d4c28ffefa7e658:B 2024-12-03T15:21:47,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:47,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239367046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:47,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-03T15:21:47,056 INFO [Thread-1165 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-03T15:21:47,057 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:21:47,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-03T15:21:47,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-03T15:21:47,059 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:21:47,059 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:21:47,059 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:21:47,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-03T15:21:47,205 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/87095c608543433d9ffe87378d6fe911 2024-12-03T15:21:47,211 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:47,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-03T15:21:47,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:47,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:47,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:47,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:47,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:47,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:47,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/058530edafcc4ae9874c90b6e6a94d54 is 50, key is test_row_0/C:col10/1733239306707/Put/seqid=0 2024-12-03T15:21:47,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742162_1338 (size=12301) 2024-12-03T15:21:47,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:47,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52942 deadline: 1733239367350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:47,365 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:47,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-03T15:21:47,365 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-03T15:21:47,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:47,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:47,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:47,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:47,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:47,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:47,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:47,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52960 deadline: 1733239367444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:47,446 DEBUG [Thread-1157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:21:47,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:47,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:47,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52976 deadline: 1733239367471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:47,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53006 deadline: 1733239367471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:47,473 DEBUG [Thread-1161 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8219 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:21:47,473 DEBUG [Thread-1155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8187 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:21:47,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:21:47,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52954 deadline: 1733239367486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:47,488 DEBUG [Thread-1163 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8200 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:21:47,520 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:47,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-03T15:21:47,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:47,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:47,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:47,521 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:47,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:47,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:21:47,535 DEBUG [Thread-1174 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3677bd4f to 127.0.0.1:60989 2024-12-03T15:21:47,535 DEBUG [Thread-1168 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x036642cb to 127.0.0.1:60989 2024-12-03T15:21:47,535 DEBUG [Thread-1174 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:47,535 DEBUG [Thread-1168 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:47,536 DEBUG [Thread-1172 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x605827c9 to 127.0.0.1:60989 2024-12-03T15:21:47,536 DEBUG [Thread-1172 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:47,538 DEBUG [Thread-1166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72537a47 to 127.0.0.1:60989 2024-12-03T15:21:47,538 DEBUG [Thread-1166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:47,538 DEBUG [Thread-1170 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c299cfb to 127.0.0.1:60989 2024-12-03T15:21:47,538 DEBUG [Thread-1170 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:47,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/058530edafcc4ae9874c90b6e6a94d54 2024-12-03T15:21:47,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/83fa8d8f7e964c93bf1b02712f50e3bc as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/83fa8d8f7e964c93bf1b02712f50e3bc 2024-12-03T15:21:47,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/83fa8d8f7e964c93bf1b02712f50e3bc, entries=150, sequenceid=425, filesize=12.0 K 2024-12-03T15:21:47,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/87095c608543433d9ffe87378d6fe911 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/87095c608543433d9ffe87378d6fe911 2024-12-03T15:21:47,661 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/87095c608543433d9ffe87378d6fe911, entries=150, sequenceid=425, filesize=12.0 K 2024-12-03T15:21:47,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/058530edafcc4ae9874c90b6e6a94d54 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/058530edafcc4ae9874c90b6e6a94d54 2024-12-03T15:21:47,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-03T15:21:47,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/058530edafcc4ae9874c90b6e6a94d54, entries=150, sequenceid=425, filesize=12.0 K 2024-12-03T15:21:47,667 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 5a9eb34ef535e1571d4c28ffefa7e658 in 958ms, sequenceid=425, compaction requested=false 2024-12-03T15:21:47,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:47,675 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:47,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-03T15:21:47,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:47,677 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-03T15:21:47,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:47,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:47,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:47,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:47,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:47,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:47,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/093642b9ad974fff89e188a2e9906591 is 50, key is test_row_0/A:col10/1733239306710/Put/seqid=0 2024-12-03T15:21:47,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742163_1339 (size=12301) 2024-12-03T15:21:47,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:47,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. as already flushing 2024-12-03T15:21:47,855 DEBUG [Thread-1159 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34cb3991 to 127.0.0.1:60989 2024-12-03T15:21:47,855 DEBUG [Thread-1159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:48,121 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/093642b9ad974fff89e188a2e9906591 2024-12-03T15:21:48,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/4aa7e4c1e19b4ef59aebf56c63d78da0 is 50, key is test_row_0/B:col10/1733239306710/Put/seqid=0 2024-12-03T15:21:48,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-03T15:21:48,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742164_1340 (size=12301) 2024-12-03T15:21:48,597 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/4aa7e4c1e19b4ef59aebf56c63d78da0 2024-12-03T15:21:48,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/2e6b3c4156d04546af4275c4d856014c is 50, key is test_row_0/C:col10/1733239306710/Put/seqid=0 2024-12-03T15:21:48,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742165_1341 (size=12301) 2024-12-03T15:21:49,007 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/2e6b3c4156d04546af4275c4d856014c 2024-12-03T15:21:49,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/093642b9ad974fff89e188a2e9906591 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/093642b9ad974fff89e188a2e9906591 2024-12-03T15:21:49,015 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/093642b9ad974fff89e188a2e9906591, entries=150, sequenceid=437, filesize=12.0 K 2024-12-03T15:21:49,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/4aa7e4c1e19b4ef59aebf56c63d78da0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/4aa7e4c1e19b4ef59aebf56c63d78da0 2024-12-03T15:21:49,019 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/4aa7e4c1e19b4ef59aebf56c63d78da0, entries=150, sequenceid=437, filesize=12.0 K 2024-12-03T15:21:49,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/2e6b3c4156d04546af4275c4d856014c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2e6b3c4156d04546af4275c4d856014c 2024-12-03T15:21:49,022 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2e6b3c4156d04546af4275c4d856014c, entries=150, sequenceid=437, filesize=12.0 K 2024-12-03T15:21:49,023 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=6.71 KB/6870 for 5a9eb34ef535e1571d4c28ffefa7e658 in 1346ms, sequenceid=437, compaction requested=true 2024-12-03T15:21:49,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:49,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:49,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-03T15:21:49,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-03T15:21:49,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-03T15:21:49,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9650 sec 2024-12-03T15:21:49,026 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.9680 sec 2024-12-03T15:21:49,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-03T15:21:49,183 INFO [Thread-1165 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-03T15:21:54,683 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T15:21:57,464 DEBUG [Thread-1157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c480dfb to 127.0.0.1:60989 2024-12-03T15:21:57,464 DEBUG [Thread-1157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:57,507 DEBUG [Thread-1161 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e9ae050 to 127.0.0.1:60989 2024-12-03T15:21:57,507 DEBUG [Thread-1161 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:57,510 DEBUG [Thread-1163 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2fef31f8 to 127.0.0.1:60989 2024-12-03T15:21:57,510 DEBUG [Thread-1163 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:57,516 DEBUG [Thread-1155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x028e73c0 to 127.0.0.1:60989 2024-12-03T15:21:57,516 DEBUG [Thread-1155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 49 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 128 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4427 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4292 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4271 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4391 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4370 2024-12-03T15:21:57,516 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-03T15:21:57,516 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-03T15:21:57,516 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c1ac389 to 127.0.0.1:60989 2024-12-03T15:21:57,516 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:21:57,517 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-03T15:21:57,517 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-03T15:21:57,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:57,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-03T15:21:57,521 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239317521"}]},"ts":"1733239317521"} 2024-12-03T15:21:57,522 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-03T15:21:57,525 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-03T15:21:57,526 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-03T15:21:57,527 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a9eb34ef535e1571d4c28ffefa7e658, UNASSIGN}] 2024-12-03T15:21:57,528 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=82, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a9eb34ef535e1571d4c28ffefa7e658, UNASSIGN 2024-12-03T15:21:57,529 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=5a9eb34ef535e1571d4c28ffefa7e658, regionState=CLOSING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:57,530 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T15:21:57,530 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; CloseRegionProcedure 5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:21:57,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-03T15:21:57,681 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:57,681 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] handler.UnassignRegionHandler(124): Close 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:57,682 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-03T15:21:57,682 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1681): Closing 5a9eb34ef535e1571d4c28ffefa7e658, disabling compactions & flushes 2024-12-03T15:21:57,682 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:57,682 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:57,682 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. after waiting 0 ms 2024-12-03T15:21:57,682 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:57,682 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(2837): Flushing 5a9eb34ef535e1571d4c28ffefa7e658 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-03T15:21:57,682 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=A 2024-12-03T15:21:57,682 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:57,682 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=B 2024-12-03T15:21:57,682 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:57,682 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 5a9eb34ef535e1571d4c28ffefa7e658, store=C 2024-12-03T15:21:57,683 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:21:57,687 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/0a9f27b21aec4678ae802f02b470f6e8 is 50, key is test_row_0/A:col10/1733239307854/Put/seqid=0 2024-12-03T15:21:57,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742166_1342 (size=12301) 2024-12-03T15:21:57,697 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/0a9f27b21aec4678ae802f02b470f6e8 2024-12-03T15:21:57,707 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d0d282115385470a8fc271f5f577bc28 is 50, key is test_row_0/B:col10/1733239307854/Put/seqid=0 2024-12-03T15:21:57,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742167_1343 (size=12301) 2024-12-03T15:21:57,726 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d0d282115385470a8fc271f5f577bc28 2024-12-03T15:21:57,733 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/b27416a18f11424ab98c38b8517df857 is 50, key is test_row_0/C:col10/1733239307854/Put/seqid=0 2024-12-03T15:21:57,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742168_1344 (size=12301) 2024-12-03T15:21:57,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-03T15:21:58,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-03T15:21:58,168 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/b27416a18f11424ab98c38b8517df857 2024-12-03T15:21:58,173 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/A/0a9f27b21aec4678ae802f02b470f6e8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/0a9f27b21aec4678ae802f02b470f6e8 2024-12-03T15:21:58,185 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/0a9f27b21aec4678ae802f02b470f6e8, entries=150, sequenceid=445, filesize=12.0 K 2024-12-03T15:21:58,186 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/B/d0d282115385470a8fc271f5f577bc28 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d0d282115385470a8fc271f5f577bc28 2024-12-03T15:21:58,191 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d0d282115385470a8fc271f5f577bc28, entries=150, sequenceid=445, filesize=12.0 K 2024-12-03T15:21:58,192 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/.tmp/C/b27416a18f11424ab98c38b8517df857 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/b27416a18f11424ab98c38b8517df857 2024-12-03T15:21:58,197 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/b27416a18f11424ab98c38b8517df857, entries=150, sequenceid=445, filesize=12.0 K 2024-12-03T15:21:58,198 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 5a9eb34ef535e1571d4c28ffefa7e658 in 515ms, sequenceid=445, compaction requested=true 2024-12-03T15:21:58,199 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1a0a0d3fea0843b59b9a5e1d59af0bf5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/63bca4b61cf24b638e783800a4af92e4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/266d781ba5594494881952a0fc125952, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7d2de0c5c36b4bab857cd806be11603f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/30ee988711784d98b78f1c5e9e85b584, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/39b42f13f905402eae5ad933f6469d9a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4e10b0eea8b94ffaa65686162370f3c3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/a2f4745f013b432c8f41e4471d9e9ea0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/a08744d0beb144ce93f8ae2eb1a496bb, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/6ea48d41904747409ea208522961b10f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/d38729c8dde5451e9d2efe9b024c0bc2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4031f720fb4840e4b75718315f728683, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/ed5b1ff3c1a248feabb9305960c7bbbd, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/2ba216904e2f4486887d836aed0146ab, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/bef950c1f77f4695b2bfd783e3e8d0d3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/fbd359b8c328419ea78513a5d4c6b545, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7f069f7fcb8446cd9849b76390819133, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/6dfbaa01aceb4ab2a37ae0094686b9f4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1e15f607d8894ca7bc806451546abe64, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/40658268c45149a09bed674a2d2c2cce, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7f0d64c5bd9840e1a1f35e23aeb51823, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/d95b1ae563624b128d880fd0a5f4d0fa, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/689d1932aaef49c2baa3c0fecd669d0a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/dd2089bb529e4c0494bb50387768af25, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/63faef4983f84765979697dafe783809, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/c248963abecb4ce0b2c1fbd527a52b27, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4ea151acc80441f984b40a805bc65e92] to archive 2024-12-03T15:21:58,201 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:21:58,217 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1a0a0d3fea0843b59b9a5e1d59af0bf5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1a0a0d3fea0843b59b9a5e1d59af0bf5 2024-12-03T15:21:58,219 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/63bca4b61cf24b638e783800a4af92e4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/63bca4b61cf24b638e783800a4af92e4 2024-12-03T15:21:58,221 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/266d781ba5594494881952a0fc125952 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/266d781ba5594494881952a0fc125952 2024-12-03T15:21:58,226 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7d2de0c5c36b4bab857cd806be11603f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7d2de0c5c36b4bab857cd806be11603f 2024-12-03T15:21:58,228 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/30ee988711784d98b78f1c5e9e85b584 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/30ee988711784d98b78f1c5e9e85b584 2024-12-03T15:21:58,231 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/39b42f13f905402eae5ad933f6469d9a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/39b42f13f905402eae5ad933f6469d9a 2024-12-03T15:21:58,236 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4e10b0eea8b94ffaa65686162370f3c3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4e10b0eea8b94ffaa65686162370f3c3 2024-12-03T15:21:58,237 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/a2f4745f013b432c8f41e4471d9e9ea0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/a2f4745f013b432c8f41e4471d9e9ea0 2024-12-03T15:21:58,246 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/a08744d0beb144ce93f8ae2eb1a496bb to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/a08744d0beb144ce93f8ae2eb1a496bb 2024-12-03T15:21:58,247 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/6ea48d41904747409ea208522961b10f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/6ea48d41904747409ea208522961b10f 2024-12-03T15:21:58,249 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/d38729c8dde5451e9d2efe9b024c0bc2 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/d38729c8dde5451e9d2efe9b024c0bc2 2024-12-03T15:21:58,251 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4031f720fb4840e4b75718315f728683 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4031f720fb4840e4b75718315f728683 2024-12-03T15:21:58,253 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/ed5b1ff3c1a248feabb9305960c7bbbd to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/ed5b1ff3c1a248feabb9305960c7bbbd 2024-12-03T15:21:58,254 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/2ba216904e2f4486887d836aed0146ab to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/2ba216904e2f4486887d836aed0146ab 2024-12-03T15:21:58,256 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/bef950c1f77f4695b2bfd783e3e8d0d3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/bef950c1f77f4695b2bfd783e3e8d0d3 2024-12-03T15:21:58,260 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/fbd359b8c328419ea78513a5d4c6b545 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/fbd359b8c328419ea78513a5d4c6b545 2024-12-03T15:21:58,261 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7f069f7fcb8446cd9849b76390819133 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7f069f7fcb8446cd9849b76390819133 2024-12-03T15:21:58,263 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/6dfbaa01aceb4ab2a37ae0094686b9f4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/6dfbaa01aceb4ab2a37ae0094686b9f4 2024-12-03T15:21:58,270 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1e15f607d8894ca7bc806451546abe64 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1e15f607d8894ca7bc806451546abe64 2024-12-03T15:21:58,276 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/40658268c45149a09bed674a2d2c2cce to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/40658268c45149a09bed674a2d2c2cce 2024-12-03T15:21:58,277 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7f0d64c5bd9840e1a1f35e23aeb51823 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/7f0d64c5bd9840e1a1f35e23aeb51823 2024-12-03T15:21:58,279 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/d95b1ae563624b128d880fd0a5f4d0fa to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/d95b1ae563624b128d880fd0a5f4d0fa 2024-12-03T15:21:58,280 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/689d1932aaef49c2baa3c0fecd669d0a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/689d1932aaef49c2baa3c0fecd669d0a 2024-12-03T15:21:58,281 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/dd2089bb529e4c0494bb50387768af25 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/dd2089bb529e4c0494bb50387768af25 2024-12-03T15:21:58,284 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/63faef4983f84765979697dafe783809 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/63faef4983f84765979697dafe783809 2024-12-03T15:21:58,285 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/c248963abecb4ce0b2c1fbd527a52b27 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/c248963abecb4ce0b2c1fbd527a52b27 2024-12-03T15:21:58,286 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4ea151acc80441f984b40a805bc65e92 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/4ea151acc80441f984b40a805bc65e92 2024-12-03T15:21:58,295 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/93cb1323e4a7462daa0d49c2f9bd1cde, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/75895ba83f774f23ad104b68d1f494d1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/42be445b2735415f9ccd1da2ba9970a6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0928d77034d347f29030f1a9f90d228f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/e650d5dbb1d84586a1ac3a684464a3ea, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/b4c8987b041c4d83aa4f6b095b1d6a36, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/f7103413680549688afdf7349db63d70, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d689bd6c64ec4e05851dd63f3dc38c41, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8de001cb8ef64323975ca6e1f7094a32, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5ae8f2e876584f8c839146af48ac1a5f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/6989f4f1a17d4ebd8c787db0b1d098cb, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8ab82de80e024fd08e8faea1a5f486ad, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0948adb7e1ed4c3383d002cddd4d1ace, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d7452f1a29bc41299218a616465af068, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d8226933f52f4d01893358f4b9537596, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5822671b20e64df490240b4802ed2619, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/c00c246a3d0e445f87603e09958e7b52, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5a2c3e377e4a4a7bb83c9f2ab8568c00, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/cd5b73b9b02f47229316bf16877ad6c4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/e28c88aa850e425bad43ee2b0c591dea, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/c71189ac12394c2a96ec10bea1c633b2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8caae91492bb41038b7384db3d98de99, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d8fa0eecbc5e4e1bb02f96dcac6228bb, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0ef6300c618141ad80f56620b81988ea, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d9a284608bc4494f8ac873d092c86a86, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/2747a3790a074a03bbe9c4700248d951, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/4b980548a3074c8ebf8063c7d9703693] to archive 2024-12-03T15:21:58,297 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:21:58,300 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/93cb1323e4a7462daa0d49c2f9bd1cde to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/93cb1323e4a7462daa0d49c2f9bd1cde 2024-12-03T15:21:58,301 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/75895ba83f774f23ad104b68d1f494d1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/75895ba83f774f23ad104b68d1f494d1 2024-12-03T15:21:58,303 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/42be445b2735415f9ccd1da2ba9970a6 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/42be445b2735415f9ccd1da2ba9970a6 2024-12-03T15:21:58,304 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0928d77034d347f29030f1a9f90d228f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0928d77034d347f29030f1a9f90d228f 2024-12-03T15:21:58,307 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/e650d5dbb1d84586a1ac3a684464a3ea to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/e650d5dbb1d84586a1ac3a684464a3ea 2024-12-03T15:21:58,309 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/b4c8987b041c4d83aa4f6b095b1d6a36 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/b4c8987b041c4d83aa4f6b095b1d6a36 2024-12-03T15:21:58,310 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/f7103413680549688afdf7349db63d70 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/f7103413680549688afdf7349db63d70 2024-12-03T15:21:58,314 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d689bd6c64ec4e05851dd63f3dc38c41 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d689bd6c64ec4e05851dd63f3dc38c41 2024-12-03T15:21:58,315 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8de001cb8ef64323975ca6e1f7094a32 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8de001cb8ef64323975ca6e1f7094a32 2024-12-03T15:21:58,316 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5ae8f2e876584f8c839146af48ac1a5f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5ae8f2e876584f8c839146af48ac1a5f 2024-12-03T15:21:58,317 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/6989f4f1a17d4ebd8c787db0b1d098cb to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/6989f4f1a17d4ebd8c787db0b1d098cb 2024-12-03T15:21:58,318 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8ab82de80e024fd08e8faea1a5f486ad to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8ab82de80e024fd08e8faea1a5f486ad 2024-12-03T15:21:58,322 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0948adb7e1ed4c3383d002cddd4d1ace to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0948adb7e1ed4c3383d002cddd4d1ace 2024-12-03T15:21:58,338 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d7452f1a29bc41299218a616465af068 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d7452f1a29bc41299218a616465af068 2024-12-03T15:21:58,343 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d8226933f52f4d01893358f4b9537596 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d8226933f52f4d01893358f4b9537596 2024-12-03T15:21:58,349 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5822671b20e64df490240b4802ed2619 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5822671b20e64df490240b4802ed2619 2024-12-03T15:21:58,354 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/c00c246a3d0e445f87603e09958e7b52 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/c00c246a3d0e445f87603e09958e7b52 2024-12-03T15:21:58,359 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5a2c3e377e4a4a7bb83c9f2ab8568c00 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/5a2c3e377e4a4a7bb83c9f2ab8568c00 2024-12-03T15:21:58,360 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/cd5b73b9b02f47229316bf16877ad6c4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/cd5b73b9b02f47229316bf16877ad6c4 2024-12-03T15:21:58,361 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/e28c88aa850e425bad43ee2b0c591dea to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/e28c88aa850e425bad43ee2b0c591dea 2024-12-03T15:21:58,362 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/c71189ac12394c2a96ec10bea1c633b2 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/c71189ac12394c2a96ec10bea1c633b2 2024-12-03T15:21:58,364 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8caae91492bb41038b7384db3d98de99 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/8caae91492bb41038b7384db3d98de99 2024-12-03T15:21:58,365 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d8fa0eecbc5e4e1bb02f96dcac6228bb to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d8fa0eecbc5e4e1bb02f96dcac6228bb 2024-12-03T15:21:58,372 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0ef6300c618141ad80f56620b81988ea to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/0ef6300c618141ad80f56620b81988ea 2024-12-03T15:21:58,374 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d9a284608bc4494f8ac873d092c86a86 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d9a284608bc4494f8ac873d092c86a86 2024-12-03T15:21:58,376 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/2747a3790a074a03bbe9c4700248d951 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/2747a3790a074a03bbe9c4700248d951 2024-12-03T15:21:58,377 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/4b980548a3074c8ebf8063c7d9703693 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/4b980548a3074c8ebf8063c7d9703693 2024-12-03T15:21:58,379 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/0ef400c3411642e68a10c519535c2c8d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/08c79b96b7594a7e856b62cb7f041b75, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/69d5c71ecc94492e9f9b5a128560c1c3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/01b999892816463d97cbd759305f9689, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/8be01a4bf7794f1eaf9475231de51bc4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7f3f9e5b46324609afbc92b84371aa53, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/868e1a442e934b47903742b8fe0b4080, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/59a3323b48f6491193396bcf5e74ad62, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/db721cfe7e8b4bd483092917a12952fe, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/241c2fdb0bed44e48696ca646ec67da5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/61cf08edb5444cb7bdad2eaeb6e1b864, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7124220096a04f69b775f50f7ecc3b50, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2d01526d6808453fb749b4e010fbef05, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/01e4df141f574d43b8f0823563094f4f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/1cf441354a2447a39bbd3bc424d207c4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/952a5b45608b444a928792044f9791ca, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/c5474e1d1f914a3097d3d03edd0e810d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/030cf2065b7f46959f86434d8bc7714b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/4f2af9d5f10d42bb8c28bf6646bddc2e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/eeaf8e000806471794a7d78f74036ef3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/aac8bfadf3cc416fa55ba2ba227fe226, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2e6e0374c6cf4aa79b879c19dd77a1ae, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/c745367f62424858abf7bd0dc5fd70c7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/04e5891c3d9e405c9ca1155d964317fd, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/e186d8538baa4d5b9914eca757120dc8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7c32abe52fdd4fc7bf8b777c5599d18a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/94ceb887bc88445d8751a8e1d2c1c83a] to archive 2024-12-03T15:21:58,380 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:21:58,382 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/0ef400c3411642e68a10c519535c2c8d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/0ef400c3411642e68a10c519535c2c8d 2024-12-03T15:21:58,383 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/08c79b96b7594a7e856b62cb7f041b75 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/08c79b96b7594a7e856b62cb7f041b75 2024-12-03T15:21:58,384 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/69d5c71ecc94492e9f9b5a128560c1c3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/69d5c71ecc94492e9f9b5a128560c1c3 2024-12-03T15:21:58,385 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/01b999892816463d97cbd759305f9689 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/01b999892816463d97cbd759305f9689 2024-12-03T15:21:58,386 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/8be01a4bf7794f1eaf9475231de51bc4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/8be01a4bf7794f1eaf9475231de51bc4 2024-12-03T15:21:58,387 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7f3f9e5b46324609afbc92b84371aa53 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7f3f9e5b46324609afbc92b84371aa53 2024-12-03T15:21:58,388 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/868e1a442e934b47903742b8fe0b4080 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/868e1a442e934b47903742b8fe0b4080 2024-12-03T15:21:58,390 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/59a3323b48f6491193396bcf5e74ad62 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/59a3323b48f6491193396bcf5e74ad62 2024-12-03T15:21:58,391 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/db721cfe7e8b4bd483092917a12952fe to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/db721cfe7e8b4bd483092917a12952fe 2024-12-03T15:21:58,397 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/241c2fdb0bed44e48696ca646ec67da5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/241c2fdb0bed44e48696ca646ec67da5 2024-12-03T15:21:58,402 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/61cf08edb5444cb7bdad2eaeb6e1b864 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/61cf08edb5444cb7bdad2eaeb6e1b864 2024-12-03T15:21:58,406 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7124220096a04f69b775f50f7ecc3b50 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7124220096a04f69b775f50f7ecc3b50 2024-12-03T15:21:58,407 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2d01526d6808453fb749b4e010fbef05 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2d01526d6808453fb749b4e010fbef05 2024-12-03T15:21:58,408 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/01e4df141f574d43b8f0823563094f4f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/01e4df141f574d43b8f0823563094f4f 2024-12-03T15:21:58,410 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/1cf441354a2447a39bbd3bc424d207c4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/1cf441354a2447a39bbd3bc424d207c4 2024-12-03T15:21:58,411 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/952a5b45608b444a928792044f9791ca to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/952a5b45608b444a928792044f9791ca 2024-12-03T15:21:58,413 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/c5474e1d1f914a3097d3d03edd0e810d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/c5474e1d1f914a3097d3d03edd0e810d 2024-12-03T15:21:58,414 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/030cf2065b7f46959f86434d8bc7714b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/030cf2065b7f46959f86434d8bc7714b 2024-12-03T15:21:58,415 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/4f2af9d5f10d42bb8c28bf6646bddc2e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/4f2af9d5f10d42bb8c28bf6646bddc2e 2024-12-03T15:21:58,416 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/eeaf8e000806471794a7d78f74036ef3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/eeaf8e000806471794a7d78f74036ef3 2024-12-03T15:21:58,417 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/aac8bfadf3cc416fa55ba2ba227fe226 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/aac8bfadf3cc416fa55ba2ba227fe226 2024-12-03T15:21:58,418 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2e6e0374c6cf4aa79b879c19dd77a1ae to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2e6e0374c6cf4aa79b879c19dd77a1ae 2024-12-03T15:21:58,419 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/c745367f62424858abf7bd0dc5fd70c7 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/c745367f62424858abf7bd0dc5fd70c7 2024-12-03T15:21:58,420 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/04e5891c3d9e405c9ca1155d964317fd to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/04e5891c3d9e405c9ca1155d964317fd 2024-12-03T15:21:58,421 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/e186d8538baa4d5b9914eca757120dc8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/e186d8538baa4d5b9914eca757120dc8 2024-12-03T15:21:58,422 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7c32abe52fdd4fc7bf8b777c5599d18a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/7c32abe52fdd4fc7bf8b777c5599d18a 2024-12-03T15:21:58,423 DEBUG [StoreCloser-TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/94ceb887bc88445d8751a8e1d2c1c83a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/94ceb887bc88445d8751a8e1d2c1c83a 2024-12-03T15:21:58,431 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/recovered.edits/448.seqid, newMaxSeqId=448, maxSeqId=1 2024-12-03T15:21:58,432 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658. 2024-12-03T15:21:58,432 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1635): Region close journal for 5a9eb34ef535e1571d4c28ffefa7e658: 2024-12-03T15:21:58,434 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] handler.UnassignRegionHandler(170): Closed 5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:58,434 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=5a9eb34ef535e1571d4c28ffefa7e658, regionState=CLOSED 2024-12-03T15:21:58,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-03T15:21:58,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; CloseRegionProcedure 5a9eb34ef535e1571d4c28ffefa7e658, server=2b5ef621a0dd,46815,1733239226292 in 905 msec 2024-12-03T15:21:58,437 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=83, resume processing ppid=82 2024-12-03T15:21:58,437 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=82, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=5a9eb34ef535e1571d4c28ffefa7e658, UNASSIGN in 909 msec 2024-12-03T15:21:58,439 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-03T15:21:58,439 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 912 msec 2024-12-03T15:21:58,441 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239318440"}]},"ts":"1733239318440"} 2024-12-03T15:21:58,441 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-03T15:21:58,446 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-03T15:21:58,448 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 929 msec 2024-12-03T15:21:58,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-03T15:21:58,627 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-03T15:21:58,628 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-03T15:21:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:58,630 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=85, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:58,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-03T15:21:58,630 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=85, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:58,631 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:58,635 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/recovered.edits] 2024-12-03T15:21:58,639 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/093642b9ad974fff89e188a2e9906591 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/093642b9ad974fff89e188a2e9906591 2024-12-03T15:21:58,640 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/0a9f27b21aec4678ae802f02b470f6e8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/0a9f27b21aec4678ae802f02b470f6e8 2024-12-03T15:21:58,641 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1470d59d043343b380b50ba015904147 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/1470d59d043343b380b50ba015904147 2024-12-03T15:21:58,642 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/83fa8d8f7e964c93bf1b02712f50e3bc to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/A/83fa8d8f7e964c93bf1b02712f50e3bc 2024-12-03T15:21:58,645 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/45962586f22242479d3667daeac47443 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/45962586f22242479d3667daeac47443 2024-12-03T15:21:58,646 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/4aa7e4c1e19b4ef59aebf56c63d78da0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/4aa7e4c1e19b4ef59aebf56c63d78da0 2024-12-03T15:21:58,647 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/87095c608543433d9ffe87378d6fe911 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/87095c608543433d9ffe87378d6fe911 2024-12-03T15:21:58,648 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d0d282115385470a8fc271f5f577bc28 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/B/d0d282115385470a8fc271f5f577bc28 2024-12-03T15:21:58,651 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/058530edafcc4ae9874c90b6e6a94d54 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/058530edafcc4ae9874c90b6e6a94d54 2024-12-03T15:21:58,681 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2e6b3c4156d04546af4275c4d856014c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/2e6b3c4156d04546af4275c4d856014c 2024-12-03T15:21:58,703 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/8e65937644c34ecca2cb826a84c7fff8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/8e65937644c34ecca2cb826a84c7fff8 2024-12-03T15:21:58,717 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/b27416a18f11424ab98c38b8517df857 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/C/b27416a18f11424ab98c38b8517df857 2024-12-03T15:21:58,725 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/recovered.edits/448.seqid to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658/recovered.edits/448.seqid 2024-12-03T15:21:58,726 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/5a9eb34ef535e1571d4c28ffefa7e658 2024-12-03T15:21:58,726 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-03T15:21:58,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-03T15:21:58,734 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=85, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:58,745 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-03T15:21:58,748 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-03T15:21:58,749 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=85, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:58,749 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-03T15:21:58,749 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733239318749"}]},"ts":"9223372036854775807"} 2024-12-03T15:21:58,751 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-03T15:21:58,751 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 5a9eb34ef535e1571d4c28ffefa7e658, NAME => 'TestAcidGuarantees,,1733239285379.5a9eb34ef535e1571d4c28ffefa7e658.', STARTKEY => '', ENDKEY => ''}] 2024-12-03T15:21:58,751 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-03T15:21:58,751 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733239318751"}]},"ts":"9223372036854775807"} 2024-12-03T15:21:58,753 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-03T15:21:58,756 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=85, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:58,757 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 128 msec 2024-12-03T15:21:58,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-03T15:21:58,932 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-03T15:21:58,945 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=237 (was 242), OpenFileDescriptor=445 (was 464), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=915 (was 825) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1447 (was 2003) 2024-12-03T15:21:58,955 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=237, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=915, ProcessCount=11, AvailableMemoryMB=1445 2024-12-03T15:21:58,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-03T15:21:58,956 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T15:21:58,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-03T15:21:58,958 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=86, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T15:21:58,958 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:58,958 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 86 2024-12-03T15:21:58,959 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=86, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T15:21:58,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-03T15:21:58,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742169_1345 (size=963) 2024-12-03T15:21:59,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-03T15:21:59,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-03T15:21:59,386 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411 2024-12-03T15:21:59,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742170_1346 (size=53) 2024-12-03T15:21:59,456 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:21:59,456 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing a2e5b6b6d57ac0725cc77df907fce083, disabling compactions & flushes 2024-12-03T15:21:59,456 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:21:59,456 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:21:59,456 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. after waiting 0 ms 2024-12-03T15:21:59,456 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:21:59,456 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:21:59,456 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:21:59,460 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=86, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T15:21:59,461 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733239319460"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733239319460"}]},"ts":"1733239319460"} 2024-12-03T15:21:59,466 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T15:21:59,467 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=86, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T15:21:59,467 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239319467"}]},"ts":"1733239319467"} 2024-12-03T15:21:59,479 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-03T15:21:59,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a2e5b6b6d57ac0725cc77df907fce083, ASSIGN}] 2024-12-03T15:21:59,506 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a2e5b6b6d57ac0725cc77df907fce083, ASSIGN 2024-12-03T15:21:59,511 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a2e5b6b6d57ac0725cc77df907fce083, ASSIGN; state=OFFLINE, location=2b5ef621a0dd,46815,1733239226292; forceNewPlan=false, retain=false 2024-12-03T15:21:59,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-03T15:21:59,662 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=87 updating hbase:meta row=a2e5b6b6d57ac0725cc77df907fce083, regionState=OPENING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:59,663 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; OpenRegionProcedure a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:21:59,815 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:59,819 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:21:59,819 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(7285): Opening region: {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} 2024-12-03T15:21:59,819 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:21:59,819 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:21:59,819 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(7327): checking encryption for a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:21:59,820 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(7330): checking classloading for a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:21:59,821 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:21:59,823 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:21:59,823 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a2e5b6b6d57ac0725cc77df907fce083 columnFamilyName A 2024-12-03T15:21:59,823 DEBUG [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:59,824 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.HStore(327): Store=a2e5b6b6d57ac0725cc77df907fce083/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:21:59,824 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:21:59,825 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:21:59,825 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a2e5b6b6d57ac0725cc77df907fce083 columnFamilyName B 2024-12-03T15:21:59,825 DEBUG [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:59,828 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.HStore(327): Store=a2e5b6b6d57ac0725cc77df907fce083/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:21:59,828 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:21:59,829 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:21:59,829 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a2e5b6b6d57ac0725cc77df907fce083 columnFamilyName C 2024-12-03T15:21:59,830 DEBUG [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:21:59,830 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.HStore(327): Store=a2e5b6b6d57ac0725cc77df907fce083/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:21:59,830 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:21:59,831 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:21:59,831 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:21:59,833 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T15:21:59,834 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(1085): writing seq id for a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:21:59,837 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T15:21:59,837 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(1102): Opened a2e5b6b6d57ac0725cc77df907fce083; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63376091, jitterRate=-0.0556226521730423}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T15:21:59,838 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegion(1001): Region open journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:21:59,839 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., pid=88, masterSystemTime=1733239319815 2024-12-03T15:21:59,840 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:21:59,840 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=88}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:21:59,841 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=87 updating hbase:meta row=a2e5b6b6d57ac0725cc77df907fce083, regionState=OPEN, openSeqNum=2, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:21:59,843 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-03T15:21:59,843 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; OpenRegionProcedure a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 in 179 msec 2024-12-03T15:21:59,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=87, resume processing ppid=86 2024-12-03T15:21:59,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a2e5b6b6d57ac0725cc77df907fce083, ASSIGN in 339 msec 2024-12-03T15:21:59,848 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=86, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T15:21:59,848 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239319848"}]},"ts":"1733239319848"} 2024-12-03T15:21:59,849 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-03T15:21:59,852 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=86, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T15:21:59,854 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 896 msec 2024-12-03T15:22:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-03T15:22:00,076 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 86 completed 2024-12-03T15:22:00,078 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b82ba2a to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3637e4c6 2024-12-03T15:22:00,092 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51f7d511, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:00,094 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:00,095 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33538, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:00,096 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T15:22:00,099 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T15:22:00,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-03T15:22:00,101 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T15:22:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:00,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742171_1347 (size=999) 2024-12-03T15:22:00,120 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-03T15:22:00,120 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-03T15:22:00,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-03T15:22:00,123 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a2e5b6b6d57ac0725cc77df907fce083, REOPEN/MOVE}] 2024-12-03T15:22:00,124 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a2e5b6b6d57ac0725cc77df907fce083, REOPEN/MOVE 2024-12-03T15:22:00,124 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=a2e5b6b6d57ac0725cc77df907fce083, regionState=CLOSING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,125 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T15:22:00,125 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; CloseRegionProcedure a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:22:00,277 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,277 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(124): Close a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:00,277 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-03T15:22:00,278 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1681): Closing a2e5b6b6d57ac0725cc77df907fce083, disabling compactions & flushes 2024-12-03T15:22:00,278 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:00,278 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:00,278 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. after waiting 0 ms 2024-12-03T15:22:00,278 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:00,282 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-03T15:22:00,283 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:00,283 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1635): Region close journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:00,283 WARN [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionServer(3786): Not adding moved region record: a2e5b6b6d57ac0725cc77df907fce083 to self. 2024-12-03T15:22:00,287 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(170): Closed a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:00,287 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=a2e5b6b6d57ac0725cc77df907fce083, regionState=CLOSED 2024-12-03T15:22:00,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-03T15:22:00,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseRegionProcedure a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 in 163 msec 2024-12-03T15:22:00,291 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=a2e5b6b6d57ac0725cc77df907fce083, REOPEN/MOVE; state=CLOSED, location=2b5ef621a0dd,46815,1733239226292; forceNewPlan=false, retain=true 2024-12-03T15:22:00,442 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=a2e5b6b6d57ac0725cc77df907fce083, regionState=OPENING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,443 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=91, state=RUNNABLE; OpenRegionProcedure a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:22:00,595 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,597 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:00,598 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(7285): Opening region: {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} 2024-12-03T15:22:00,598 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:00,598 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:22:00,598 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(7327): checking encryption for a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:00,598 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(7330): checking classloading for a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:00,600 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:00,600 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:22:00,601 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a2e5b6b6d57ac0725cc77df907fce083 columnFamilyName A 2024-12-03T15:22:00,602 DEBUG [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:00,602 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.HStore(327): Store=a2e5b6b6d57ac0725cc77df907fce083/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:22:00,603 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:00,603 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:22:00,604 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a2e5b6b6d57ac0725cc77df907fce083 columnFamilyName B 2024-12-03T15:22:00,604 DEBUG [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:00,604 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.HStore(327): Store=a2e5b6b6d57ac0725cc77df907fce083/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:22:00,604 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:00,605 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:22:00,605 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a2e5b6b6d57ac0725cc77df907fce083 columnFamilyName C 2024-12-03T15:22:00,605 DEBUG [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:00,605 INFO [StoreOpener-a2e5b6b6d57ac0725cc77df907fce083-1 {}] regionserver.HStore(327): Store=a2e5b6b6d57ac0725cc77df907fce083/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:22:00,605 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:00,606 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:00,607 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:00,608 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T15:22:00,609 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(1085): writing seq id for a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:00,610 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(1102): Opened a2e5b6b6d57ac0725cc77df907fce083; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63905404, jitterRate=-0.04773527383804321}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T15:22:00,611 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegion(1001): Region open journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:00,612 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., pid=93, masterSystemTime=1733239320594 2024-12-03T15:22:00,613 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:00,613 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=93}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:00,613 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=a2e5b6b6d57ac0725cc77df907fce083, regionState=OPEN, openSeqNum=5, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=91 2024-12-03T15:22:00,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=91, state=SUCCESS; OpenRegionProcedure a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 in 171 msec 2024-12-03T15:22:00,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-12-03T15:22:00,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a2e5b6b6d57ac0725cc77df907fce083, REOPEN/MOVE in 492 msec 2024-12-03T15:22:00,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-03T15:22:00,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 495 msec 2024-12-03T15:22:00,620 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 518 msec 2024-12-03T15:22:00,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-03T15:22:00,622 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b6cf8cb to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72f422b4 2024-12-03T15:22:00,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc42ea6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:00,630 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ec15031 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2df33cdf 2024-12-03T15:22:00,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117e86d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:00,637 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dd5b441 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9f472e0 2024-12-03T15:22:00,640 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd96549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:00,641 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c336ea4 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167a78b0 2024-12-03T15:22:00,647 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31aea41b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:00,648 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f94d721 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5aee939b 2024-12-03T15:22:00,651 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e247aa1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:00,652 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f49665c to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2205f666 2024-12-03T15:22:00,655 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27539bdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:00,656 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683f8469 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6584e9ce 2024-12-03T15:22:00,660 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3203d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:00,660 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e4d3d0 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37ec8e3b 2024-12-03T15:22:00,663 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798e7fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:00,664 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b308f62 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@787e5169 2024-12-03T15:22:00,666 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7284f16d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:00,667 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68035c67 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@627cad17 2024-12-03T15:22:00,670 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37a637ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:00,673 DEBUG [hconnection-0x2e34555e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:00,673 DEBUG [hconnection-0x27637193-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:00,674 DEBUG [hconnection-0x5b043b6b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:00,675 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33554, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:00,676 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33568, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:00,676 DEBUG [hconnection-0x4d7842a3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:00,677 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33576, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:00,678 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33584, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:00,681 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:00,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=94, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees 2024-12-03T15:22:00,683 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=94, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:00,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-03T15:22:00,683 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=94, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:00,683 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:00,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:00,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:22:00,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:00,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:00,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:00,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:00,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:00,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:00,697 DEBUG [hconnection-0x7bf595b1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:00,697 DEBUG [hconnection-0x31ae54af-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:00,698 DEBUG [hconnection-0x204de444-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:00,700 DEBUG [hconnection-0x5e33a56d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:00,700 DEBUG [hconnection-0x491a2234-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:00,701 DEBUG [hconnection-0x33dcf21f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:00,702 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33596, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:00,703 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33608, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:00,703 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33628, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:00,704 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33622, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:00,704 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33638, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:00,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:00,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239380713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:00,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:00,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239380715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239380712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:00,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239380715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:00,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239380703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,718 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33636, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:00,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203c43c99eb91a64273b4a5ca3f222a6f43_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239320683/Put/seqid=0 2024-12-03T15:22:00,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742172_1348 (size=12154) 2024-12-03T15:22:00,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-03T15:22:00,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:00,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:00,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239380817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239380817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:00,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239380817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:00,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239380819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:00,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239380816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,835 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-03T15:22:00,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:00,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:00,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:00,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:00,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:00,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:00,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-03T15:22:00,991 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:00,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-03T15:22:00,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:00,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:00,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:00,992 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:00,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:00,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239381024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239381025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239381025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239381027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239381037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,146 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:01,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-03T15:22:01,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:01,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:01,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:01,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,173 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203c43c99eb91a64273b4a5ca3f222a6f43_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203c43c99eb91a64273b4a5ca3f222a6f43_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:01,179 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/09823856e8b24069abab0f448027b05d, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:01,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/09823856e8b24069abab0f448027b05d is 175, key is test_row_0/A:col10/1733239320683/Put/seqid=0 2024-12-03T15:22:01,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742173_1349 (size=30955) 2024-12-03T15:22:01,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-03T15:22:01,306 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-03T15:22:01,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:01,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:01,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:01,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239381330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239381332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239381333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239381336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239381344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,463 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-03T15:22:01,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:01,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:01,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:01,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,615 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/09823856e8b24069abab0f448027b05d 2024-12-03T15:22:01,616 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-03T15:22:01,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:01,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:01,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:01,617 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/7541ce23b4ec44779838a428c4d2c163 is 50, key is test_row_0/B:col10/1733239320683/Put/seqid=0 2024-12-03T15:22:01,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742174_1350 (size=12001) 2024-12-03T15:22:01,769 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-03T15:22:01,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:01,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:01,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:01,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-03T15:22:01,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239381840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239381842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239381843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239381843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:01,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239381849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,923 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:01,924 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-03T15:22:01,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:01,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:01,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:01,924 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:01,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:02,077 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:02,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-03T15:22:02,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:02,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:02,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:02,078 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:02,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:02,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:02,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/7541ce23b4ec44779838a428c4d2c163 2024-12-03T15:22:02,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/481a44ed66c844aebfb9ebea464a1237 is 50, key is test_row_0/C:col10/1733239320683/Put/seqid=0 2024-12-03T15:22:02,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742175_1351 (size=12001) 2024-12-03T15:22:02,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/481a44ed66c844aebfb9ebea464a1237 2024-12-03T15:22:02,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/09823856e8b24069abab0f448027b05d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/09823856e8b24069abab0f448027b05d 2024-12-03T15:22:02,231 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/09823856e8b24069abab0f448027b05d, entries=150, sequenceid=16, filesize=30.2 K 2024-12-03T15:22:02,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/7541ce23b4ec44779838a428c4d2c163 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/7541ce23b4ec44779838a428c4d2c163 2024-12-03T15:22:02,233 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:02,247 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/7541ce23b4ec44779838a428c4d2c163, entries=150, sequenceid=16, filesize=11.7 K 2024-12-03T15:22:02,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/481a44ed66c844aebfb9ebea464a1237 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/481a44ed66c844aebfb9ebea464a1237 2024-12-03T15:22:02,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-03T15:22:02,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:02,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:02,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:02,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:02,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:02,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:02,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/481a44ed66c844aebfb9ebea464a1237, entries=150, sequenceid=16, filesize=11.7 K 2024-12-03T15:22:02,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for a2e5b6b6d57ac0725cc77df907fce083 in 1568ms, sequenceid=16, compaction requested=false 2024-12-03T15:22:02,257 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-03T15:22:02,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:02,400 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:02,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-03T15:22:02,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:02,401 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-03T15:22:02,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:02,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:02,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:02,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:02,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:02,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:02,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120355440996f16747fb9ff2bb7936fefbae_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239320711/Put/seqid=0 2024-12-03T15:22:02,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742176_1352 (size=12154) 2024-12-03T15:22:02,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-03T15:22:02,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:02,862 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120355440996f16747fb9ff2bb7936fefbae_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120355440996f16747fb9ff2bb7936fefbae_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:02,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/ae6d6e0a1cc3422f9e5c2cdcb019411d, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:02,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/ae6d6e0a1cc3422f9e5c2cdcb019411d is 175, key is test_row_0/A:col10/1733239320711/Put/seqid=0 2024-12-03T15:22:02,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:02,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:02,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:02,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239382877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:02,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:02,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239382880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:02,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:02,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239382880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:02,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239382880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:02,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239382883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:02,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742177_1353 (size=30955) 2024-12-03T15:22:02,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:02,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239382986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:02,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:02,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239382988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:02,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:02,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239382988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:02,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:02,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239382989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:03,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:03,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239382994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:03,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:03,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239383194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:03,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239383196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:03,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239383196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:03,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239383197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:03,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:03,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239383206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:03,303 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/ae6d6e0a1cc3422f9e5c2cdcb019411d 2024-12-03T15:22:03,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/49a0822c0b3547fe848353024e38b9c4 is 50, key is test_row_0/B:col10/1733239320711/Put/seqid=0 2024-12-03T15:22:03,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742178_1354 (size=12001) 2024-12-03T15:22:03,323 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/49a0822c0b3547fe848353024e38b9c4 2024-12-03T15:22:03,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/8faa1ccd175c46f7a15ba265aa391734 is 50, key is test_row_0/C:col10/1733239320711/Put/seqid=0 2024-12-03T15:22:03,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742179_1355 (size=12001) 2024-12-03T15:22:03,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:03,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239383497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:03,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:03,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239383502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:03,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:03,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239383502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:03,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:03,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239383502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:03,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:03,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239383509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:03,738 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/8faa1ccd175c46f7a15ba265aa391734 2024-12-03T15:22:03,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/ae6d6e0a1cc3422f9e5c2cdcb019411d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ae6d6e0a1cc3422f9e5c2cdcb019411d 2024-12-03T15:22:03,747 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ae6d6e0a1cc3422f9e5c2cdcb019411d, entries=150, sequenceid=41, filesize=30.2 K 2024-12-03T15:22:03,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/49a0822c0b3547fe848353024e38b9c4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/49a0822c0b3547fe848353024e38b9c4 2024-12-03T15:22:03,753 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/49a0822c0b3547fe848353024e38b9c4, entries=150, sequenceid=41, filesize=11.7 K 2024-12-03T15:22:03,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/8faa1ccd175c46f7a15ba265aa391734 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8faa1ccd175c46f7a15ba265aa391734 2024-12-03T15:22:03,760 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8faa1ccd175c46f7a15ba265aa391734, entries=150, sequenceid=41, filesize=11.7 K 2024-12-03T15:22:03,761 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for a2e5b6b6d57ac0725cc77df907fce083 in 1360ms, sequenceid=41, compaction requested=false 2024-12-03T15:22:03,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:03,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:03,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=95 2024-12-03T15:22:03,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=95 2024-12-03T15:22:03,764 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-12-03T15:22:03,764 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0790 sec 2024-12-03T15:22:03,766 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees in 3.0840 sec 2024-12-03T15:22:03,966 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-03T15:22:04,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:04,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-03T15:22:04,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:04,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:04,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:04,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:04,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:04,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:04,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120320b9dd4a54f94b318919408522e328fe_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239322864/Put/seqid=0 2024-12-03T15:22:04,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742180_1356 (size=14594) 2024-12-03T15:22:04,080 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:04,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239384072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,084 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120320b9dd4a54f94b318919408522e328fe_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120320b9dd4a54f94b318919408522e328fe_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:04,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239384074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,085 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/30353f92d91641f58b6021904dc2f9a0, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:04,087 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/30353f92d91641f58b6021904dc2f9a0 is 175, key is test_row_0/A:col10/1733239322864/Put/seqid=0 2024-12-03T15:22:04,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239384087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239384091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239384094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742181_1357 (size=39549) 2024-12-03T15:22:04,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239384181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239384186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239384201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239384201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239384202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239384390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239384394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239384408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239384410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239384411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,527 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/30353f92d91641f58b6021904dc2f9a0 2024-12-03T15:22:04,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/5c3296914f2d4f0bb2e3bb329833086d is 50, key is test_row_0/B:col10/1733239322864/Put/seqid=0 2024-12-03T15:22:04,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742182_1358 (size=12001) 2024-12-03T15:22:04,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/5c3296914f2d4f0bb2e3bb329833086d 2024-12-03T15:22:04,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239384694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/42d7ac96ca6a4ef48588854efb84d1ae is 50, key is test_row_0/C:col10/1733239322864/Put/seqid=0 2024-12-03T15:22:04,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239384708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742183_1359 (size=12001) 2024-12-03T15:22:04,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239384722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239384723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:04,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239384737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-03T15:22:04,814 INFO [Thread-1578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 94 completed 2024-12-03T15:22:04,817 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:04,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees 2024-12-03T15:22:04,823 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:04,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-03T15:22:04,823 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:04,824 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:04,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-03T15:22:04,975 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:04,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:04,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:04,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:04,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:04,976 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:04,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:04,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-03T15:22:05,128 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:05,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:05,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,129 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/42d7ac96ca6a4ef48588854efb84d1ae 2024-12-03T15:22:05,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/30353f92d91641f58b6021904dc2f9a0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/30353f92d91641f58b6021904dc2f9a0 2024-12-03T15:22:05,159 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/30353f92d91641f58b6021904dc2f9a0, entries=200, sequenceid=53, filesize=38.6 K 2024-12-03T15:22:05,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/5c3296914f2d4f0bb2e3bb329833086d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/5c3296914f2d4f0bb2e3bb329833086d 2024-12-03T15:22:05,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/5c3296914f2d4f0bb2e3bb329833086d, entries=150, sequenceid=53, filesize=11.7 K 2024-12-03T15:22:05,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/42d7ac96ca6a4ef48588854efb84d1ae as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/42d7ac96ca6a4ef48588854efb84d1ae 2024-12-03T15:22:05,175 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/42d7ac96ca6a4ef48588854efb84d1ae, entries=150, sequenceid=53, filesize=11.7 K 2024-12-03T15:22:05,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for a2e5b6b6d57ac0725cc77df907fce083 in 1172ms, sequenceid=53, compaction requested=true 2024-12-03T15:22:05,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:05,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:05,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:05,177 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:05,177 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:05,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:05,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:05,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:05,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:05,178 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:05,178 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/B is initiating minor compaction (all files) 2024-12-03T15:22:05,178 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/B in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,178 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/7541ce23b4ec44779838a428c4d2c163, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/49a0822c0b3547fe848353024e38b9c4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/5c3296914f2d4f0bb2e3bb329833086d] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=35.2 K 2024-12-03T15:22:05,178 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:05,178 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/A is initiating minor compaction (all files) 2024-12-03T15:22:05,178 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/A in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,178 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/09823856e8b24069abab0f448027b05d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ae6d6e0a1cc3422f9e5c2cdcb019411d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/30353f92d91641f58b6021904dc2f9a0] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=99.1 K 2024-12-03T15:22:05,179 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,179 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/09823856e8b24069abab0f448027b05d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ae6d6e0a1cc3422f9e5c2cdcb019411d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/30353f92d91641f58b6021904dc2f9a0] 2024-12-03T15:22:05,180 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 7541ce23b4ec44779838a428c4d2c163, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733239320683 2024-12-03T15:22:05,180 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09823856e8b24069abab0f448027b05d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733239320683 2024-12-03T15:22:05,181 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 49a0822c0b3547fe848353024e38b9c4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733239320711 2024-12-03T15:22:05,181 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae6d6e0a1cc3422f9e5c2cdcb019411d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733239320711 2024-12-03T15:22:05,181 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c3296914f2d4f0bb2e3bb329833086d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733239322864 2024-12-03T15:22:05,181 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30353f92d91641f58b6021904dc2f9a0, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733239322864 2024-12-03T15:22:05,199 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:05,214 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#B#compaction#304 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:05,215 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/fd21a49038df476cbfd04a4eb2727e80 is 50, key is test_row_0/B:col10/1733239322864/Put/seqid=0 2024-12-03T15:22:05,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:05,216 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-03T15:22:05,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:05,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:05,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:05,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:05,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:05,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:05,222 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241203f9e78b91e4bd4b8599fe1e1054176528_a2e5b6b6d57ac0725cc77df907fce083 store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:05,224 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241203f9e78b91e4bd4b8599fe1e1054176528_a2e5b6b6d57ac0725cc77df907fce083, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:05,224 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203f9e78b91e4bd4b8599fe1e1054176528_a2e5b6b6d57ac0725cc77df907fce083 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:05,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742185_1361 (size=4469) 2024-12-03T15:22:05,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120377483a96342245f4b7ae554f4ea164d2_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239324055/Put/seqid=0 2024-12-03T15:22:05,238 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#A#compaction#303 average throughput is 0.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:05,239 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/2c3e3bf98f8e411b8d256f910df1c759 is 175, key is test_row_0/A:col10/1733239322864/Put/seqid=0 2024-12-03T15:22:05,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742184_1360 (size=12104) 2024-12-03T15:22:05,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742186_1362 (size=14594) 2024-12-03T15:22:05,247 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,248 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/fd21a49038df476cbfd04a4eb2727e80 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/fd21a49038df476cbfd04a4eb2727e80 2024-12-03T15:22:05,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239385242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742187_1363 (size=31058) 2024-12-03T15:22:05,256 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120377483a96342245f4b7ae554f4ea164d2_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120377483a96342245f4b7ae554f4ea164d2_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:05,258 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/5d84206308454979a3e96ea4e5cef65d, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:05,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/5d84206308454979a3e96ea4e5cef65d is 175, key is test_row_0/A:col10/1733239324055/Put/seqid=0 2024-12-03T15:22:05,259 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/B of a2e5b6b6d57ac0725cc77df907fce083 into fd21a49038df476cbfd04a4eb2727e80(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:05,259 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:05,259 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/B, priority=13, startTime=1733239325177; duration=0sec 2024-12-03T15:22:05,259 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:05,259 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:B 2024-12-03T15:22:05,259 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:05,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239385248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239385248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,262 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:05,262 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/C is initiating minor compaction (all files) 2024-12-03T15:22:05,262 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/C in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,262 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/481a44ed66c844aebfb9ebea464a1237, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8faa1ccd175c46f7a15ba265aa391734, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/42d7ac96ca6a4ef48588854efb84d1ae] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=35.2 K 2024-12-03T15:22:05,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239385250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239385252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,263 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 481a44ed66c844aebfb9ebea464a1237, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733239320683 2024-12-03T15:22:05,263 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8faa1ccd175c46f7a15ba265aa391734, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733239320711 2024-12-03T15:22:05,263 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 42d7ac96ca6a4ef48588854efb84d1ae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733239322864 2024-12-03T15:22:05,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742188_1364 (size=39549) 2024-12-03T15:22:05,266 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/5d84206308454979a3e96ea4e5cef65d 2024-12-03T15:22:05,272 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#C#compaction#306 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:05,273 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/65f6557b05324fcb92a03f4ab9e8feef is 50, key is test_row_0/C:col10/1733239322864/Put/seqid=0 2024-12-03T15:22:05,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/890a48df60ae4a9393d5581d3c2ac2de is 50, key is test_row_0/B:col10/1733239324055/Put/seqid=0 2024-12-03T15:22:05,282 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,282 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:05,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:05,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,282 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742190_1366 (size=12001) 2024-12-03T15:22:05,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/890a48df60ae4a9393d5581d3c2ac2de 2024-12-03T15:22:05,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742189_1365 (size=12104) 2024-12-03T15:22:05,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/ae3136e25f5141ffbdcb37847163f47e is 50, key is test_row_0/C:col10/1733239324055/Put/seqid=0 2024-12-03T15:22:05,315 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/65f6557b05324fcb92a03f4ab9e8feef as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/65f6557b05324fcb92a03f4ab9e8feef 2024-12-03T15:22:05,323 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/C of a2e5b6b6d57ac0725cc77df907fce083 into 65f6557b05324fcb92a03f4ab9e8feef(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:05,323 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:05,323 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/C, priority=13, startTime=1733239325177; duration=0sec 2024-12-03T15:22:05,323 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:05,323 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:C 2024-12-03T15:22:05,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742191_1367 (size=12001) 2024-12-03T15:22:05,325 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/ae3136e25f5141ffbdcb37847163f47e 2024-12-03T15:22:05,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/5d84206308454979a3e96ea4e5cef65d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5d84206308454979a3e96ea4e5cef65d 2024-12-03T15:22:05,338 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5d84206308454979a3e96ea4e5cef65d, entries=200, sequenceid=78, filesize=38.6 K 2024-12-03T15:22:05,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/890a48df60ae4a9393d5581d3c2ac2de as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/890a48df60ae4a9393d5581d3c2ac2de 2024-12-03T15:22:05,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/890a48df60ae4a9393d5581d3c2ac2de, entries=150, sequenceid=78, filesize=11.7 K 2024-12-03T15:22:05,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/ae3136e25f5141ffbdcb37847163f47e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/ae3136e25f5141ffbdcb37847163f47e 2024-12-03T15:22:05,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/ae3136e25f5141ffbdcb37847163f47e, entries=150, sequenceid=78, filesize=11.7 K 2024-12-03T15:22:05,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for a2e5b6b6d57ac0725cc77df907fce083 in 134ms, sequenceid=78, compaction requested=false 2024-12-03T15:22:05,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:05,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:05,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,386 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-03T15:22:05,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:05,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:05,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:05,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:05,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:05,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:05,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203544718f537c1423a8e9f09333d253e47_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239325386/Put/seqid=0 2024-12-03T15:22:05,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742192_1368 (size=14594) 2024-12-03T15:22:05,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-03T15:22:05,440 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:05,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:05,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239385496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239385498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239385499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,593 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:05,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:05,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,594 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239385601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239385606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239385606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,659 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/2c3e3bf98f8e411b8d256f910df1c759 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2c3e3bf98f8e411b8d256f910df1c759 2024-12-03T15:22:05,665 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/A of a2e5b6b6d57ac0725cc77df907fce083 into 2c3e3bf98f8e411b8d256f910df1c759(size=30.3 K), total size for store is 69.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:05,665 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:05,665 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/A, priority=13, startTime=1733239325177; duration=0sec 2024-12-03T15:22:05,665 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:05,665 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:A 2024-12-03T15:22:05,746 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,747 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:05,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:05,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,747 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239385806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239385812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:05,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239385813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,817 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:05,829 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203544718f537c1423a8e9f09333d253e47_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203544718f537c1423a8e9f09333d253e47_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:05,830 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/50d28f3cc7d04838abdff5d51e5609e5, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:05,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/50d28f3cc7d04838abdff5d51e5609e5 is 175, key is test_row_0/A:col10/1733239325386/Put/seqid=0 2024-12-03T15:22:05,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742193_1369 (size=39549) 2024-12-03T15:22:05,863 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/50d28f3cc7d04838abdff5d51e5609e5 2024-12-03T15:22:05,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/302c5a329c0041479b825c2f3df2bb51 is 50, key is test_row_0/B:col10/1733239325386/Put/seqid=0 2024-12-03T15:22:05,899 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:05,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:05,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:05,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:05,902 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:05,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742194_1370 (size=12001) 2024-12-03T15:22:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-03T15:22:06,054 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:06,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:06,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239386118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239386127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:06,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239386130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,213 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:06,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:06,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,273 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239386265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:06,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239386271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/302c5a329c0041479b825c2f3df2bb51 2024-12-03T15:22:06,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/9bfd486575464cea8ebe879551e68a68 is 50, key is test_row_0/C:col10/1733239325386/Put/seqid=0 2024-12-03T15:22:06,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742195_1371 (size=12001) 2024-12-03T15:22:06,382 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,383 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:06,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:06,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,384 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,537 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:06,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:06,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,538 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:06,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239386621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:06,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239386631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:06,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239386646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,690 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:06,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:06,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] handler.RSProcedureHandler(58): pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=97 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=97 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:06,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/9bfd486575464cea8ebe879551e68a68 2024-12-03T15:22:06,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/50d28f3cc7d04838abdff5d51e5609e5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/50d28f3cc7d04838abdff5d51e5609e5 2024-12-03T15:22:06,761 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/50d28f3cc7d04838abdff5d51e5609e5, entries=200, sequenceid=93, filesize=38.6 K 2024-12-03T15:22:06,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/302c5a329c0041479b825c2f3df2bb51 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/302c5a329c0041479b825c2f3df2bb51 2024-12-03T15:22:06,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/302c5a329c0041479b825c2f3df2bb51, entries=150, sequenceid=93, filesize=11.7 K 2024-12-03T15:22:06,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/9bfd486575464cea8ebe879551e68a68 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/9bfd486575464cea8ebe879551e68a68 2024-12-03T15:22:06,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/9bfd486575464cea8ebe879551e68a68, entries=150, sequenceid=93, filesize=11.7 K 2024-12-03T15:22:06,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for a2e5b6b6d57ac0725cc77df907fce083 in 1387ms, sequenceid=93, compaction requested=true 2024-12-03T15:22:06,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:06,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:06,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:06,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:06,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:06,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:06,774 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:06,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:06,774 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:06,775 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:06,775 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110156 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:06,775 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/A is initiating minor compaction (all files) 2024-12-03T15:22:06,775 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/B is initiating minor compaction (all files) 2024-12-03T15:22:06,775 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/A in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,775 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/B in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,775 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2c3e3bf98f8e411b8d256f910df1c759, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5d84206308454979a3e96ea4e5cef65d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/50d28f3cc7d04838abdff5d51e5609e5] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=107.6 K 2024-12-03T15:22:06,775 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/fd21a49038df476cbfd04a4eb2727e80, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/890a48df60ae4a9393d5581d3c2ac2de, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/302c5a329c0041479b825c2f3df2bb51] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=35.3 K 2024-12-03T15:22:06,775 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,775 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2c3e3bf98f8e411b8d256f910df1c759, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5d84206308454979a3e96ea4e5cef65d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/50d28f3cc7d04838abdff5d51e5609e5] 2024-12-03T15:22:06,776 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting fd21a49038df476cbfd04a4eb2727e80, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733239322864 2024-12-03T15:22:06,776 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c3e3bf98f8e411b8d256f910df1c759, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733239322864 2024-12-03T15:22:06,776 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 890a48df60ae4a9393d5581d3c2ac2de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239324055 2024-12-03T15:22:06,776 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d84206308454979a3e96ea4e5cef65d, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239324055 2024-12-03T15:22:06,777 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 302c5a329c0041479b825c2f3df2bb51, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239325248 2024-12-03T15:22:06,777 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50d28f3cc7d04838abdff5d51e5609e5, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239325238 2024-12-03T15:22:06,808 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#B#compaction#312 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:06,809 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/b62f7c06d3dc4c1083a3e0d97d123f09 is 50, key is test_row_0/B:col10/1733239325386/Put/seqid=0 2024-12-03T15:22:06,814 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:06,817 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241203fc3214882b824089af16014f2c2d81d9_a2e5b6b6d57ac0725cc77df907fce083 store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:06,819 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241203fc3214882b824089af16014f2c2d81d9_a2e5b6b6d57ac0725cc77df907fce083, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:06,819 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203fc3214882b824089af16014f2c2d81d9_a2e5b6b6d57ac0725cc77df907fce083 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:06,843 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:06,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742196_1372 (size=12207) 2024-12-03T15:22:06,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-03T15:22:06,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:06,845 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-03T15:22:06,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:06,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:06,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:06,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:06,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:06,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:06,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742197_1373 (size=4469) 2024-12-03T15:22:06,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412033e958b3cc2924cad9a399e69e58c863a_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239325497/Put/seqid=0 2024-12-03T15:22:06,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-03T15:22:06,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742198_1374 (size=12154) 2024-12-03T15:22:06,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:06,940 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412033e958b3cc2924cad9a399e69e58c863a_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412033e958b3cc2924cad9a399e69e58c863a_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:06,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/987a37d0df514d2bb2aaffbcddb279f7, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:06,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/987a37d0df514d2bb2aaffbcddb279f7 is 175, key is test_row_0/A:col10/1733239325497/Put/seqid=0 2024-12-03T15:22:06,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742199_1375 (size=30955) 2024-12-03T15:22:07,260 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/b62f7c06d3dc4c1083a3e0d97d123f09 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/b62f7c06d3dc4c1083a3e0d97d123f09 2024-12-03T15:22:07,275 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#A#compaction#313 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:07,276 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/ff6fdb683b474665bccb5b267c1b0981 is 175, key is test_row_0/A:col10/1733239325386/Put/seqid=0 2024-12-03T15:22:07,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742200_1376 (size=31161) 2024-12-03T15:22:07,289 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/B of a2e5b6b6d57ac0725cc77df907fce083 into b62f7c06d3dc4c1083a3e0d97d123f09(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:07,289 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:07,289 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/B, priority=13, startTime=1733239326774; duration=0sec 2024-12-03T15:22:07,289 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:07,289 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:B 2024-12-03T15:22:07,289 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:07,291 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:07,291 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/C is initiating minor compaction (all files) 2024-12-03T15:22:07,291 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/C in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:07,291 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/65f6557b05324fcb92a03f4ab9e8feef, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/ae3136e25f5141ffbdcb37847163f47e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/9bfd486575464cea8ebe879551e68a68] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=35.3 K 2024-12-03T15:22:07,291 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 65f6557b05324fcb92a03f4ab9e8feef, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733239322864 2024-12-03T15:22:07,292 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting ae3136e25f5141ffbdcb37847163f47e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239324055 2024-12-03T15:22:07,292 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bfd486575464cea8ebe879551e68a68, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239325248 2024-12-03T15:22:07,301 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#C#compaction#315 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:07,302 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/45e79b169f764f01b8fc25124e244af5 is 50, key is test_row_0/C:col10/1733239325386/Put/seqid=0 2024-12-03T15:22:07,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742201_1377 (size=12207) 2024-12-03T15:22:07,379 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/987a37d0df514d2bb2aaffbcddb279f7 2024-12-03T15:22:07,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/264d7f436da84526930e80a0e15d5c81 is 50, key is test_row_0/B:col10/1733239325497/Put/seqid=0 2024-12-03T15:22:07,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742202_1378 (size=12001) 2024-12-03T15:22:07,416 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/264d7f436da84526930e80a0e15d5c81 2024-12-03T15:22:07,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/8fd2f92dfa6744bc8882f181d324cd98 is 50, key is test_row_0/C:col10/1733239325497/Put/seqid=0 2024-12-03T15:22:07,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742203_1379 (size=12001) 2024-12-03T15:22:07,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:07,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:07,695 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/ff6fdb683b474665bccb5b267c1b0981 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ff6fdb683b474665bccb5b267c1b0981 2024-12-03T15:22:07,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:07,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239387690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:07,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:07,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239387690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:07,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:07,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239387691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:07,700 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/A of a2e5b6b6d57ac0725cc77df907fce083 into ff6fdb683b474665bccb5b267c1b0981(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:07,700 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:07,700 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/A, priority=13, startTime=1733239326774; duration=0sec 2024-12-03T15:22:07,700 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:07,700 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:A 2024-12-03T15:22:07,715 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/45e79b169f764f01b8fc25124e244af5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/45e79b169f764f01b8fc25124e244af5 2024-12-03T15:22:07,720 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/C of a2e5b6b6d57ac0725cc77df907fce083 into 45e79b169f764f01b8fc25124e244af5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:07,720 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:07,720 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/C, priority=13, startTime=1733239326774; duration=0sec 2024-12-03T15:22:07,720 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:07,720 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:C 2024-12-03T15:22:07,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:07,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239387800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:07,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:07,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239387800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:07,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:07,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239387800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:07,845 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/8fd2f92dfa6744bc8882f181d324cd98 2024-12-03T15:22:07,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/987a37d0df514d2bb2aaffbcddb279f7 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/987a37d0df514d2bb2aaffbcddb279f7 2024-12-03T15:22:07,864 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/987a37d0df514d2bb2aaffbcddb279f7, entries=150, sequenceid=117, filesize=30.2 K 2024-12-03T15:22:07,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/264d7f436da84526930e80a0e15d5c81 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/264d7f436da84526930e80a0e15d5c81 2024-12-03T15:22:07,872 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/264d7f436da84526930e80a0e15d5c81, entries=150, sequenceid=117, filesize=11.7 K 2024-12-03T15:22:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/8fd2f92dfa6744bc8882f181d324cd98 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8fd2f92dfa6744bc8882f181d324cd98 2024-12-03T15:22:07,883 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8fd2f92dfa6744bc8882f181d324cd98, entries=150, sequenceid=117, filesize=11.7 K 2024-12-03T15:22:07,885 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for a2e5b6b6d57ac0725cc77df907fce083 in 1040ms, sequenceid=117, compaction requested=false 2024-12-03T15:22:07,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:07,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:07,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=97 2024-12-03T15:22:07,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=97 2024-12-03T15:22:07,895 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-03T15:22:07,895 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0690 sec 2024-12-03T15:22:07,900 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees in 3.0790 sec 2024-12-03T15:22:08,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:08,011 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-03T15:22:08,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:08,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:08,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:08,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:08,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:08,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:08,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203c192c51a117f439f95e1572b8416a8b1_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239327686/Put/seqid=0 2024-12-03T15:22:08,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742204_1380 (size=17284) 2024-12-03T15:22:08,063 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:08,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239388057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239388064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239388067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,075 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203c192c51a117f439f95e1572b8416a8b1_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203c192c51a117f439f95e1572b8416a8b1_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:08,086 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/3744e4cc122e4657a1b070f8ef1fb9fa, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:08,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/3744e4cc122e4657a1b070f8ef1fb9fa is 175, key is test_row_0/A:col10/1733239327686/Put/seqid=0 2024-12-03T15:22:08,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742205_1381 (size=48389) 2024-12-03T15:22:08,136 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=137, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/3744e4cc122e4657a1b070f8ef1fb9fa 2024-12-03T15:22:08,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/34d7b225a1e54504afd1a686640a2433 is 50, key is test_row_0/B:col10/1733239327686/Put/seqid=0 2024-12-03T15:22:08,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239388167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239388175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239388179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742206_1382 (size=12151) 2024-12-03T15:22:08,202 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/34d7b225a1e54504afd1a686640a2433 2024-12-03T15:22:08,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/2d2bd850eb8940ec82beef04798408ea is 50, key is test_row_0/C:col10/1733239327686/Put/seqid=0 2024-12-03T15:22:08,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742207_1383 (size=12151) 2024-12-03T15:22:08,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/2d2bd850eb8940ec82beef04798408ea 2024-12-03T15:22:08,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/3744e4cc122e4657a1b070f8ef1fb9fa as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3744e4cc122e4657a1b070f8ef1fb9fa 2024-12-03T15:22:08,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3744e4cc122e4657a1b070f8ef1fb9fa, entries=250, sequenceid=137, filesize=47.3 K 2024-12-03T15:22:08,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/34d7b225a1e54504afd1a686640a2433 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/34d7b225a1e54504afd1a686640a2433 2024-12-03T15:22:08,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239388290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,297 DEBUG [Thread-1570 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4211 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:22:08,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239388296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,309 DEBUG [Thread-1576 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4222 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:22:08,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/34d7b225a1e54504afd1a686640a2433, entries=150, sequenceid=137, filesize=11.9 K 2024-12-03T15:22:08,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/2d2bd850eb8940ec82beef04798408ea as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/2d2bd850eb8940ec82beef04798408ea 2024-12-03T15:22:08,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/2d2bd850eb8940ec82beef04798408ea, entries=150, sequenceid=137, filesize=11.9 K 2024-12-03T15:22:08,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for a2e5b6b6d57ac0725cc77df907fce083 in 352ms, sequenceid=137, compaction requested=true 2024-12-03T15:22:08,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:08,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:08,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:08,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:08,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:08,363 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:08,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:08,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:22:08,363 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:08,374 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:08,374 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/B is initiating minor compaction (all files) 2024-12-03T15:22:08,375 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/B in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:08,375 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/b62f7c06d3dc4c1083a3e0d97d123f09, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/264d7f436da84526930e80a0e15d5c81, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/34d7b225a1e54504afd1a686640a2433] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=35.5 K 2024-12-03T15:22:08,376 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b62f7c06d3dc4c1083a3e0d97d123f09, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239325248 2024-12-03T15:22:08,376 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 264d7f436da84526930e80a0e15d5c81, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733239325495 2024-12-03T15:22:08,376 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 34d7b225a1e54504afd1a686640a2433, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733239327686 2024-12-03T15:22:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:08,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-03T15:22:08,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:08,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:08,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:08,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:08,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:08,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:08,387 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110505 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:08,387 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/A is initiating minor compaction (all files) 2024-12-03T15:22:08,387 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/A in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:08,387 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ff6fdb683b474665bccb5b267c1b0981, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/987a37d0df514d2bb2aaffbcddb279f7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3744e4cc122e4657a1b070f8ef1fb9fa] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=107.9 K 2024-12-03T15:22:08,387 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:08,388 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ff6fdb683b474665bccb5b267c1b0981, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/987a37d0df514d2bb2aaffbcddb279f7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3744e4cc122e4657a1b070f8ef1fb9fa] 2024-12-03T15:22:08,388 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff6fdb683b474665bccb5b267c1b0981, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239325248 2024-12-03T15:22:08,388 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 987a37d0df514d2bb2aaffbcddb279f7, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733239325495 2024-12-03T15:22:08,389 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3744e4cc122e4657a1b070f8ef1fb9fa, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733239327686 2024-12-03T15:22:08,405 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#B#compaction#321 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:08,406 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:08,406 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/03d98b94a7544c808fb7b397d95db10f is 50, key is test_row_0/B:col10/1733239327686/Put/seqid=0 2024-12-03T15:22:08,422 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412030aa4654697c3484eb4d6ce26856129b1_a2e5b6b6d57ac0725cc77df907fce083 store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:08,424 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412030aa4654697c3484eb4d6ce26856129b1_a2e5b6b6d57ac0725cc77df907fce083, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:08,424 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412030aa4654697c3484eb4d6ce26856129b1_a2e5b6b6d57ac0725cc77df907fce083 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:08,445 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203fb06022673fb495b9ab3fb3864715522_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239328056/Put/seqid=0 2024-12-03T15:22:08,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239388445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239388446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239388447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742208_1384 (size=12459) 2024-12-03T15:22:08,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742209_1385 (size=4469) 2024-12-03T15:22:08,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742210_1386 (size=12304) 2024-12-03T15:22:08,491 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/03d98b94a7544c808fb7b397d95db10f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/03d98b94a7544c808fb7b397d95db10f 2024-12-03T15:22:08,515 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/B of a2e5b6b6d57ac0725cc77df907fce083 into 03d98b94a7544c808fb7b397d95db10f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:08,515 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:08,515 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/B, priority=13, startTime=1733239328363; duration=0sec 2024-12-03T15:22:08,515 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:08,516 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:B 2024-12-03T15:22:08,516 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:08,518 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:08,518 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/C is initiating minor compaction (all files) 2024-12-03T15:22:08,518 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/C in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:08,519 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/45e79b169f764f01b8fc25124e244af5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8fd2f92dfa6744bc8882f181d324cd98, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/2d2bd850eb8940ec82beef04798408ea] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=35.5 K 2024-12-03T15:22:08,519 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 45e79b169f764f01b8fc25124e244af5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239325248 2024-12-03T15:22:08,519 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fd2f92dfa6744bc8882f181d324cd98, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733239325495 2024-12-03T15:22:08,521 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d2bd850eb8940ec82beef04798408ea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733239327686 2024-12-03T15:22:08,530 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#C#compaction#324 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:08,532 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/8065bb78dc7e4dad8250331e043f6e4d is 50, key is test_row_0/C:col10/1733239327686/Put/seqid=0 2024-12-03T15:22:08,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239388554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742211_1387 (size=12459) 2024-12-03T15:22:08,576 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/8065bb78dc7e4dad8250331e043f6e4d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8065bb78dc7e4dad8250331e043f6e4d 2024-12-03T15:22:08,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239388562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239388562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,593 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/C of a2e5b6b6d57ac0725cc77df907fce083 into 8065bb78dc7e4dad8250331e043f6e4d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:08,593 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:08,593 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/C, priority=13, startTime=1733239328363; duration=0sec 2024-12-03T15:22:08,593 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:08,593 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:C 2024-12-03T15:22:08,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239388763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239388782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239388786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:08,887 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:08,888 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#A#compaction#322 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:08,889 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/2c46f435856346f0bb2af6ef94b4f50d is 175, key is test_row_0/A:col10/1733239327686/Put/seqid=0 2024-12-03T15:22:08,892 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203fb06022673fb495b9ab3fb3864715522_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203fb06022673fb495b9ab3fb3864715522_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:08,893 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/6da5ece3e94741899051ebea5c361486, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:08,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/6da5ece3e94741899051ebea5c361486 is 175, key is test_row_0/A:col10/1733239328056/Put/seqid=0 2024-12-03T15:22:08,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742212_1388 (size=31413) 2024-12-03T15:22:08,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742213_1389 (size=31105) 2024-12-03T15:22:08,940 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/6da5ece3e94741899051ebea5c361486 2024-12-03T15:22:08,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-03T15:22:08,942 INFO [Thread-1578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-12-03T15:22:08,951 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/3de8578b3bb740b88482aefebcfccdef is 50, key is test_row_0/B:col10/1733239328056/Put/seqid=0 2024-12-03T15:22:08,970 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:08,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees 2024-12-03T15:22:08,973 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:08,973 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:08,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:08,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-03T15:22:08,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742214_1390 (size=12151) 2024-12-03T15:22:08,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/3de8578b3bb740b88482aefebcfccdef 2024-12-03T15:22:09,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/808f9edb39cf4b7a947d0673338b55c2 is 50, key is test_row_0/C:col10/1733239328056/Put/seqid=0 2024-12-03T15:22:09,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742215_1391 (size=12151) 2024-12-03T15:22:09,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:09,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239389068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-03T15:22:09,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:09,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239389087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:09,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239389090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,125 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:09,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:09,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:09,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:09,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-03T15:22:09,278 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:09,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:09,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:09,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:09,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,312 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/2c46f435856346f0bb2af6ef94b4f50d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2c46f435856346f0bb2af6ef94b4f50d 2024-12-03T15:22:09,317 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/A of a2e5b6b6d57ac0725cc77df907fce083 into 2c46f435856346f0bb2af6ef94b4f50d(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:09,317 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:09,317 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/A, priority=13, startTime=1733239328363; duration=0sec 2024-12-03T15:22:09,317 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:09,317 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:A 2024-12-03T15:22:09,434 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:09,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:09,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:09,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:09,434 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/808f9edb39cf4b7a947d0673338b55c2 2024-12-03T15:22:09,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/6da5ece3e94741899051ebea5c361486 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6da5ece3e94741899051ebea5c361486 2024-12-03T15:22:09,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6da5ece3e94741899051ebea5c361486, entries=150, sequenceid=157, filesize=30.4 K 2024-12-03T15:22:09,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/3de8578b3bb740b88482aefebcfccdef as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/3de8578b3bb740b88482aefebcfccdef 2024-12-03T15:22:09,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/3de8578b3bb740b88482aefebcfccdef, entries=150, sequenceid=157, filesize=11.9 K 2024-12-03T15:22:09,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/808f9edb39cf4b7a947d0673338b55c2 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/808f9edb39cf4b7a947d0673338b55c2 2024-12-03T15:22:09,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/808f9edb39cf4b7a947d0673338b55c2, entries=150, sequenceid=157, filesize=11.9 K 2024-12-03T15:22:09,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for a2e5b6b6d57ac0725cc77df907fce083 in 1137ms, sequenceid=157, compaction requested=false 2024-12-03T15:22:09,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-03T15:22:09,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-03T15:22:09,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:09,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:09,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:09,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:09,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:09,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:09,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:09,586 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,590 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:09,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:09,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:09,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:09,591 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412034200fe34ce00494799b8252d7a60265b_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239328409/Put/seqid=0 2024-12-03T15:22:09,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742216_1392 (size=14794) 2024-12-03T15:22:09,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:09,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239389650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239389654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:09,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239389654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,744 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:09,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:09,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:09,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:09,745 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:09,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239389756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:09,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239389760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:09,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239389761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,898 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,898 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:09,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:09,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:09,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:09,899 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:09,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:09,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239389962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:09,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239389966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:09,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:09,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239389966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,047 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:10,051 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:10,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:10,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,052 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,059 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412034200fe34ce00494799b8252d7a60265b_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034200fe34ce00494799b8252d7a60265b_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:10,060 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/2a90ee42ecc749d09cb28f96bc6698b5, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:10,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/2a90ee42ecc749d09cb28f96bc6698b5 is 175, key is test_row_0/A:col10/1733239328409/Put/seqid=0 2024-12-03T15:22:10,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-03T15:22:10,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742217_1393 (size=39749) 2024-12-03T15:22:10,108 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=177, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/2a90ee42ecc749d09cb28f96bc6698b5 2024-12-03T15:22:10,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/f2f876cbc2ee4fa99c6ede28e2988eed is 50, key is test_row_0/B:col10/1733239328409/Put/seqid=0 2024-12-03T15:22:10,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742218_1394 (size=12151) 2024-12-03T15:22:10,137 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/f2f876cbc2ee4fa99c6ede28e2988eed 2024-12-03T15:22:10,167 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/43c943f56ce0496fb50d4c08275fc1ac is 50, key is test_row_0/C:col10/1733239328409/Put/seqid=0 2024-12-03T15:22:10,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742219_1395 (size=12151) 2024-12-03T15:22:10,206 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:10,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:10,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:10,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239390271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:10,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239390271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:10,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239390271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,360 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:10,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:10,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,528 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:10,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:10,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,529 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/43c943f56ce0496fb50d4c08275fc1ac 2024-12-03T15:22:10,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/2a90ee42ecc749d09cb28f96bc6698b5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2a90ee42ecc749d09cb28f96bc6698b5 2024-12-03T15:22:10,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2a90ee42ecc749d09cb28f96bc6698b5, entries=200, sequenceid=177, filesize=38.8 K 2024-12-03T15:22:10,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/f2f876cbc2ee4fa99c6ede28e2988eed as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/f2f876cbc2ee4fa99c6ede28e2988eed 2024-12-03T15:22:10,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/f2f876cbc2ee4fa99c6ede28e2988eed, entries=150, sequenceid=177, filesize=11.9 K 2024-12-03T15:22:10,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/43c943f56ce0496fb50d4c08275fc1ac as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/43c943f56ce0496fb50d4c08275fc1ac 2024-12-03T15:22:10,685 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:10,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:10,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,686 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/43c943f56ce0496fb50d4c08275fc1ac, entries=150, sequenceid=177, filesize=11.9 K 2024-12-03T15:22:10,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for a2e5b6b6d57ac0725cc77df907fce083 in 1112ms, sequenceid=177, compaction requested=true 2024-12-03T15:22:10,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:10,695 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:10,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:10,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:10,695 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:10,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:10,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:10,696 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:10,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:10,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:10,696 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/A is initiating minor compaction (all files) 2024-12-03T15:22:10,696 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/A in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,696 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2c46f435856346f0bb2af6ef94b4f50d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6da5ece3e94741899051ebea5c361486, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2a90ee42ecc749d09cb28f96bc6698b5] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=99.9 K 2024-12-03T15:22:10,696 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,696 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2c46f435856346f0bb2af6ef94b4f50d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6da5ece3e94741899051ebea5c361486, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2a90ee42ecc749d09cb28f96bc6698b5] 2024-12-03T15:22:10,698 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:10,698 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/B is initiating minor compaction (all files) 2024-12-03T15:22:10,698 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/B in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,698 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/03d98b94a7544c808fb7b397d95db10f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/3de8578b3bb740b88482aefebcfccdef, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/f2f876cbc2ee4fa99c6ede28e2988eed] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=35.9 K 2024-12-03T15:22:10,698 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c46f435856346f0bb2af6ef94b4f50d, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733239327686 2024-12-03T15:22:10,699 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6da5ece3e94741899051ebea5c361486, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733239328056 2024-12-03T15:22:10,699 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 03d98b94a7544c808fb7b397d95db10f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733239327686 2024-12-03T15:22:10,699 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a90ee42ecc749d09cb28f96bc6698b5, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733239328409 2024-12-03T15:22:10,700 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 3de8578b3bb740b88482aefebcfccdef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733239328056 2024-12-03T15:22:10,701 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting f2f876cbc2ee4fa99c6ede28e2988eed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733239328409 2024-12-03T15:22:10,724 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#B#compaction#330 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:10,724 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/f20548bcf15d45cd893a7c3565999e7e is 50, key is test_row_0/B:col10/1733239328409/Put/seqid=0 2024-12-03T15:22:10,736 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:10,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742220_1396 (size=12561) 2024-12-03T15:22:10,778 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412039764ae7cb99b4895b37a1063db7a703b_a2e5b6b6d57ac0725cc77df907fce083 store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:10,779 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412039764ae7cb99b4895b37a1063db7a703b_a2e5b6b6d57ac0725cc77df907fce083, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:10,780 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412039764ae7cb99b4895b37a1063db7a703b_a2e5b6b6d57ac0725cc77df907fce083 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:10,783 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-03T15:22:10,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:10,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:10,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:10,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:10,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:10,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:10,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:10,792 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/f20548bcf15d45cd893a7c3565999e7e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/f20548bcf15d45cd893a7c3565999e7e 2024-12-03T15:22:10,804 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412033333e333795d49e9b6e3fbf2070c859f_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239329620/Put/seqid=0 2024-12-03T15:22:10,815 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/B of a2e5b6b6d57ac0725cc77df907fce083 into f20548bcf15d45cd893a7c3565999e7e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:10,816 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:10,816 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/B, priority=13, startTime=1733239330695; duration=0sec 2024-12-03T15:22:10,816 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:10,816 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:B 2024-12-03T15:22:10,816 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:10,817 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:10,817 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/C is initiating minor compaction (all files) 2024-12-03T15:22:10,817 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/C in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,817 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8065bb78dc7e4dad8250331e043f6e4d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/808f9edb39cf4b7a947d0673338b55c2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/43c943f56ce0496fb50d4c08275fc1ac] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=35.9 K 2024-12-03T15:22:10,819 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8065bb78dc7e4dad8250331e043f6e4d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733239327686 2024-12-03T15:22:10,820 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 808f9edb39cf4b7a947d0673338b55c2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733239328056 2024-12-03T15:22:10,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742221_1397 (size=4469) 2024-12-03T15:22:10,822 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 43c943f56ce0496fb50d4c08275fc1ac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733239328409 2024-12-03T15:22:10,840 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:10,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:10,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:10,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:10,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742222_1398 (size=17284) 2024-12-03T15:22:10,844 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#C#compaction#333 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:10,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:10,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239390837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,844 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/178430428070433286ee1ecc428784f5 is 50, key is test_row_0/C:col10/1733239328409/Put/seqid=0 2024-12-03T15:22:10,845 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:10,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:10,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239390840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:10,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239390842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,862 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412033333e333795d49e9b6e3fbf2070c859f_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412033333e333795d49e9b6e3fbf2070c859f_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:10,863 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/4f7f38663cab4a3b85165438a13cd39c, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:10,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/4f7f38663cab4a3b85165438a13cd39c is 175, key is test_row_0/A:col10/1733239329620/Put/seqid=0 2024-12-03T15:22:10,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742223_1399 (size=12561) 2024-12-03T15:22:10,885 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/178430428070433286ee1ecc428784f5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/178430428070433286ee1ecc428784f5 2024-12-03T15:22:10,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742224_1400 (size=48389) 2024-12-03T15:22:10,901 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/C of a2e5b6b6d57ac0725cc77df907fce083 into 178430428070433286ee1ecc428784f5(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:10,901 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:10,902 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/C, priority=13, startTime=1733239330696; duration=0sec 2024-12-03T15:22:10,903 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:10,903 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:C 2024-12-03T15:22:10,903 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/4f7f38663cab4a3b85165438a13cd39c 2024-12-03T15:22:10,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/887b7c067b31493db48cb98a91fcc080 is 50, key is test_row_0/B:col10/1733239329620/Put/seqid=0 2024-12-03T15:22:10,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:10,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239390951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:10,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239390952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:10,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239390952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:10,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742225_1401 (size=12151) 2024-12-03T15:22:10,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/887b7c067b31493db48cb98a91fcc080 2024-12-03T15:22:10,998 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/54c1b794af1341359d1e2efb9a322804 is 50, key is test_row_0/C:col10/1733239329620/Put/seqid=0 2024-12-03T15:22:11,008 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:11,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:11,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:11,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:11,009 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:11,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:11,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:11,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742226_1402 (size=12151) 2024-12-03T15:22:11,041 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/54c1b794af1341359d1e2efb9a322804 2024-12-03T15:22:11,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/4f7f38663cab4a3b85165438a13cd39c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/4f7f38663cab4a3b85165438a13cd39c 2024-12-03T15:22:11,054 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/4f7f38663cab4a3b85165438a13cd39c, entries=250, sequenceid=198, filesize=47.3 K 2024-12-03T15:22:11,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/887b7c067b31493db48cb98a91fcc080 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/887b7c067b31493db48cb98a91fcc080 2024-12-03T15:22:11,066 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/887b7c067b31493db48cb98a91fcc080, entries=150, sequenceid=198, filesize=11.9 K 2024-12-03T15:22:11,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/54c1b794af1341359d1e2efb9a322804 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/54c1b794af1341359d1e2efb9a322804 2024-12-03T15:22:11,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/54c1b794af1341359d1e2efb9a322804, entries=150, sequenceid=198, filesize=11.9 K 2024-12-03T15:22:11,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for a2e5b6b6d57ac0725cc77df907fce083 in 293ms, sequenceid=198, compaction requested=false 2024-12-03T15:22:11,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:11,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-03T15:22:11,161 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:11,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:11,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-03T15:22:11,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:11,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:11,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:11,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:11,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:11,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:11,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:11,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:11,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:11,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:11,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:11,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:11,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412033dd00f1e8309433096bafc6f02bffdd6_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239330822/Put/seqid=0 2024-12-03T15:22:11,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742227_1403 (size=19774) 2024-12-03T15:22:11,216 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:11,224 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239391214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,226 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412033dd00f1e8309433096bafc6f02bffdd6_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412033dd00f1e8309433096bafc6f02bffdd6_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:11,227 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/6ddeff555fa84571a5c948437b4eceb4, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:11,228 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#A#compaction#331 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:11,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/6ddeff555fa84571a5c948437b4eceb4 is 175, key is test_row_0/A:col10/1733239330822/Put/seqid=0 2024-12-03T15:22:11,228 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/3b9aeb6c3b4b48d8a75cfb3b9e86c6a1 is 175, key is test_row_0/A:col10/1733239328409/Put/seqid=0 2024-12-03T15:22:11,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239391224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239391225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742229_1405 (size=57033) 2024-12-03T15:22:11,262 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=218, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/6ddeff555fa84571a5c948437b4eceb4 2024-12-03T15:22:11,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742228_1404 (size=31515) 2024-12-03T15:22:11,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/788ebe9a0b2d43eca0c882a0d2ea2473 is 50, key is test_row_0/B:col10/1733239330822/Put/seqid=0 2024-12-03T15:22:11,296 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/3b9aeb6c3b4b48d8a75cfb3b9e86c6a1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3b9aeb6c3b4b48d8a75cfb3b9e86c6a1 2024-12-03T15:22:11,307 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/A of a2e5b6b6d57ac0725cc77df907fce083 into 3b9aeb6c3b4b48d8a75cfb3b9e86c6a1(size=30.8 K), total size for store is 78.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:11,307 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:11,307 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/A, priority=13, startTime=1733239330694; duration=0sec 2024-12-03T15:22:11,308 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:11,308 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:A 2024-12-03T15:22:11,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742230_1406 (size=12151) 2024-12-03T15:22:11,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/788ebe9a0b2d43eca0c882a0d2ea2473 2024-12-03T15:22:11,324 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:11,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:11,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:11,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:11,325 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:11,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:11,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/934b7e89bd184c99beb8262c1aa1033d is 50, key is test_row_0/C:col10/1733239330822/Put/seqid=0 2024-12-03T15:22:11,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239391330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239391334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239391335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742231_1407 (size=12151) 2024-12-03T15:22:11,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/934b7e89bd184c99beb8262c1aa1033d 2024-12-03T15:22:11,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/6ddeff555fa84571a5c948437b4eceb4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6ddeff555fa84571a5c948437b4eceb4 2024-12-03T15:22:11,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6ddeff555fa84571a5c948437b4eceb4, entries=300, sequenceid=218, filesize=55.7 K 2024-12-03T15:22:11,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/788ebe9a0b2d43eca0c882a0d2ea2473 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/788ebe9a0b2d43eca0c882a0d2ea2473 2024-12-03T15:22:11,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/788ebe9a0b2d43eca0c882a0d2ea2473, entries=150, sequenceid=218, filesize=11.9 K 2024-12-03T15:22:11,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/934b7e89bd184c99beb8262c1aa1033d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/934b7e89bd184c99beb8262c1aa1033d 2024-12-03T15:22:11,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/934b7e89bd184c99beb8262c1aa1033d, entries=150, sequenceid=218, filesize=11.9 K 2024-12-03T15:22:11,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for a2e5b6b6d57ac0725cc77df907fce083 in 234ms, sequenceid=218, compaction requested=true 2024-12-03T15:22:11,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:11,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:11,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:11,398 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:11,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:11,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:11,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:11,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:22:11,399 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136937 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:11,399 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/A is initiating minor compaction (all files) 2024-12-03T15:22:11,399 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/A in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:11,400 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3b9aeb6c3b4b48d8a75cfb3b9e86c6a1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/4f7f38663cab4a3b85165438a13cd39c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6ddeff555fa84571a5c948437b4eceb4] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=133.7 K 2024-12-03T15:22:11,400 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:11,400 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3b9aeb6c3b4b48d8a75cfb3b9e86c6a1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/4f7f38663cab4a3b85165438a13cd39c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6ddeff555fa84571a5c948437b4eceb4] 2024-12-03T15:22:11,401 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:11,402 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b9aeb6c3b4b48d8a75cfb3b9e86c6a1, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733239328409 2024-12-03T15:22:11,403 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f7f38663cab4a3b85165438a13cd39c, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1733239329620 2024-12-03T15:22:11,403 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ddeff555fa84571a5c948437b4eceb4, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733239330822 2024-12-03T15:22:11,417 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:11,418 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:11,418 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/B is initiating minor compaction (all files) 2024-12-03T15:22:11,418 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/B in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:11,418 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/f20548bcf15d45cd893a7c3565999e7e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/887b7c067b31493db48cb98a91fcc080, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/788ebe9a0b2d43eca0c882a0d2ea2473] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=36.0 K 2024-12-03T15:22:11,422 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting f20548bcf15d45cd893a7c3565999e7e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733239328409 2024-12-03T15:22:11,423 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 887b7c067b31493db48cb98a91fcc080, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1733239329620 2024-12-03T15:22:11,425 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 788ebe9a0b2d43eca0c882a0d2ea2473, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733239330822 2024-12-03T15:22:11,435 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120362afe6e4f9f24cf3862894d992b3feb3_a2e5b6b6d57ac0725cc77df907fce083 store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:11,437 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120362afe6e4f9f24cf3862894d992b3feb3_a2e5b6b6d57ac0725cc77df907fce083, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:11,438 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120362afe6e4f9f24cf3862894d992b3feb3_a2e5b6b6d57ac0725cc77df907fce083 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:11,443 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#B#compaction#340 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:11,444 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/c270d3d12cd1446893093f4f7945b021 is 50, key is test_row_0/B:col10/1733239330822/Put/seqid=0 2024-12-03T15:22:11,481 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-03T15:22:11,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:11,482 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-03T15:22:11,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:11,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:11,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:11,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:11,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:11,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:11,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742232_1408 (size=4469) 2024-12-03T15:22:11,486 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#A#compaction#339 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:11,486 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/610f5ed736e5470db8bac1ebb91e6a2e is 175, key is test_row_0/A:col10/1733239330822/Put/seqid=0 2024-12-03T15:22:11,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742233_1409 (size=12663) 2024-12-03T15:22:11,521 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/c270d3d12cd1446893093f4f7945b021 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/c270d3d12cd1446893093f4f7945b021 2024-12-03T15:22:11,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203cf87486ef3d14da98e7159ae8b5304aa_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239331206/Put/seqid=0 2024-12-03T15:22:11,535 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/B of a2e5b6b6d57ac0725cc77df907fce083 into c270d3d12cd1446893093f4f7945b021(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:11,535 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:11,535 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/B, priority=13, startTime=1733239331398; duration=0sec 2024-12-03T15:22:11,535 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:11,535 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:B 2024-12-03T15:22:11,535 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:11,537 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:11,537 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/C is initiating minor compaction (all files) 2024-12-03T15:22:11,537 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/C in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:11,537 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/178430428070433286ee1ecc428784f5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/54c1b794af1341359d1e2efb9a322804, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/934b7e89bd184c99beb8262c1aa1033d] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=36.0 K 2024-12-03T15:22:11,537 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 178430428070433286ee1ecc428784f5, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1733239328409 2024-12-03T15:22:11,538 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 54c1b794af1341359d1e2efb9a322804, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1733239329620 2024-12-03T15:22:11,538 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 934b7e89bd184c99beb8262c1aa1033d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733239330822 2024-12-03T15:22:11,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:11,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:11,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742234_1410 (size=31617) 2024-12-03T15:22:11,580 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#C#compaction#342 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:11,580 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/8a1d0fbb45714e3c979b2a32a8450b31 is 50, key is test_row_0/C:col10/1733239330822/Put/seqid=0 2024-12-03T15:22:11,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742236_1412 (size=12663) 2024-12-03T15:22:11,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742235_1411 (size=12304) 2024-12-03T15:22:11,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:11,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239391596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,604 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/610f5ed736e5470db8bac1ebb91e6a2e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/610f5ed736e5470db8bac1ebb91e6a2e 2024-12-03T15:22:11,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239391597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239391598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,630 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/8a1d0fbb45714e3c979b2a32a8450b31 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8a1d0fbb45714e3c979b2a32a8450b31 2024-12-03T15:22:11,634 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/A of a2e5b6b6d57ac0725cc77df907fce083 into 610f5ed736e5470db8bac1ebb91e6a2e(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:11,634 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:11,634 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/A, priority=13, startTime=1733239331398; duration=0sec 2024-12-03T15:22:11,634 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:11,634 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:A 2024-12-03T15:22:11,640 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/C of a2e5b6b6d57ac0725cc77df907fce083 into 8a1d0fbb45714e3c979b2a32a8450b31(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:11,640 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:11,640 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/C, priority=13, startTime=1733239331398; duration=0sec 2024-12-03T15:22:11,640 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203cf87486ef3d14da98e7159ae8b5304aa_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203cf87486ef3d14da98e7159ae8b5304aa_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:11,640 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:11,640 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:C 2024-12-03T15:22:11,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/404d1cc9893e478f9cf6e930194be5fd, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:11,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/404d1cc9893e478f9cf6e930194be5fd is 175, key is test_row_0/A:col10/1733239331206/Put/seqid=0 2024-12-03T15:22:11,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742237_1413 (size=31105) 2024-12-03T15:22:11,686 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/404d1cc9893e478f9cf6e930194be5fd 2024-12-03T15:22:11,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/ec014c582a3b4be2994551366068fb32 is 50, key is test_row_0/B:col10/1733239331206/Put/seqid=0 2024-12-03T15:22:11,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239391718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239391711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239391722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742238_1414 (size=12151) 2024-12-03T15:22:11,751 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/ec014c582a3b4be2994551366068fb32 2024-12-03T15:22:11,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/5cb02d515bf249d58aca6354884fa24e is 50, key is test_row_0/C:col10/1733239331206/Put/seqid=0 2024-12-03T15:22:11,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742239_1415 (size=12151) 2024-12-03T15:22:11,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239391925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239391929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:11,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:11,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239391934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,230 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/5cb02d515bf249d58aca6354884fa24e 2024-12-03T15:22:12,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:12,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239392234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:12,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239392237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/404d1cc9893e478f9cf6e930194be5fd as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/404d1cc9893e478f9cf6e930194be5fd 2024-12-03T15:22:12,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:12,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239392246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,252 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/404d1cc9893e478f9cf6e930194be5fd, entries=150, sequenceid=238, filesize=30.4 K 2024-12-03T15:22:12,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/ec014c582a3b4be2994551366068fb32 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/ec014c582a3b4be2994551366068fb32 2024-12-03T15:22:12,268 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/ec014c582a3b4be2994551366068fb32, entries=150, sequenceid=238, filesize=11.9 K 2024-12-03T15:22:12,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/5cb02d515bf249d58aca6354884fa24e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/5cb02d515bf249d58aca6354884fa24e 2024-12-03T15:22:12,274 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/5cb02d515bf249d58aca6354884fa24e, entries=150, sequenceid=238, filesize=11.9 K 2024-12-03T15:22:12,275 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for a2e5b6b6d57ac0725cc77df907fce083 in 794ms, sequenceid=238, compaction requested=false 2024-12-03T15:22:12,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:12,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:12,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=99 2024-12-03T15:22:12,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=99 2024-12-03T15:22:12,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-03T15:22:12,289 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3080 sec 2024-12-03T15:22:12,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees in 3.3190 sec 2024-12-03T15:22:12,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:12,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-03T15:22:12,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:12,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:12,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:12,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:12,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:12,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:12,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203cf688dc70158408381f6a086e1141dd2_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239331595/Put/seqid=0 2024-12-03T15:22:12,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742240_1416 (size=14844) 2024-12-03T15:22:12,345 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:12,350 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203cf688dc70158408381f6a086e1141dd2_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203cf688dc70158408381f6a086e1141dd2_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:12,360 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/fc2247c01abd47e498e044977f93596c, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:12,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/fc2247c01abd47e498e044977f93596c is 175, key is test_row_0/A:col10/1733239331595/Put/seqid=0 2024-12-03T15:22:12,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742241_1417 (size=39799) 2024-12-03T15:22:12,385 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=259, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/fc2247c01abd47e498e044977f93596c 2024-12-03T15:22:12,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:12,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239392391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,395 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/421e254ee4634c1c8b0dfe82eae853ce is 50, key is test_row_0/B:col10/1733239331595/Put/seqid=0 2024-12-03T15:22:12,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:12,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239392395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742242_1418 (size=12201) 2024-12-03T15:22:12,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:12,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239392495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:12,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239392506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:12,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239392702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:12,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239392713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:12,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239392743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:12,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239392753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:12,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239392753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:12,860 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/421e254ee4634c1c8b0dfe82eae853ce 2024-12-03T15:22:12,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/162d7a307c724445a9c87c9c67bd78be is 50, key is test_row_0/C:col10/1733239331595/Put/seqid=0 2024-12-03T15:22:12,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742243_1419 (size=12201) 2024-12-03T15:22:12,916 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/162d7a307c724445a9c87c9c67bd78be 2024-12-03T15:22:12,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/fc2247c01abd47e498e044977f93596c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fc2247c01abd47e498e044977f93596c 2024-12-03T15:22:12,932 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fc2247c01abd47e498e044977f93596c, entries=200, sequenceid=259, filesize=38.9 K 2024-12-03T15:22:12,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/421e254ee4634c1c8b0dfe82eae853ce as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/421e254ee4634c1c8b0dfe82eae853ce 2024-12-03T15:22:12,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/421e254ee4634c1c8b0dfe82eae853ce, entries=150, sequenceid=259, filesize=11.9 K 2024-12-03T15:22:12,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/162d7a307c724445a9c87c9c67bd78be as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/162d7a307c724445a9c87c9c67bd78be 2024-12-03T15:22:12,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/162d7a307c724445a9c87c9c67bd78be, entries=150, sequenceid=259, filesize=11.9 K 2024-12-03T15:22:12,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for a2e5b6b6d57ac0725cc77df907fce083 in 631ms, sequenceid=259, compaction requested=true 2024-12-03T15:22:12,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:12,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:12,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:12,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:12,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:12,946 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:12,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:12,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:22:12,946 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:12,947 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:12,947 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/B is initiating minor compaction (all files) 2024-12-03T15:22:12,947 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/B in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:12,948 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/c270d3d12cd1446893093f4f7945b021, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/ec014c582a3b4be2994551366068fb32, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/421e254ee4634c1c8b0dfe82eae853ce] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=36.1 K 2024-12-03T15:22:12,948 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102521 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:12,948 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c270d3d12cd1446893093f4f7945b021, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733239330822 2024-12-03T15:22:12,948 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/A is initiating minor compaction (all files) 2024-12-03T15:22:12,948 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/A in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:12,948 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/610f5ed736e5470db8bac1ebb91e6a2e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/404d1cc9893e478f9cf6e930194be5fd, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fc2247c01abd47e498e044977f93596c] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=100.1 K 2024-12-03T15:22:12,948 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:12,948 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/610f5ed736e5470db8bac1ebb91e6a2e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/404d1cc9893e478f9cf6e930194be5fd, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fc2247c01abd47e498e044977f93596c] 2024-12-03T15:22:12,948 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting ec014c582a3b4be2994551366068fb32, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733239331206 2024-12-03T15:22:12,949 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 610f5ed736e5470db8bac1ebb91e6a2e, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733239330822 2024-12-03T15:22:12,949 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 421e254ee4634c1c8b0dfe82eae853ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733239331573 2024-12-03T15:22:12,949 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 404d1cc9893e478f9cf6e930194be5fd, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733239331206 2024-12-03T15:22:12,951 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc2247c01abd47e498e044977f93596c, keycount=200, bloomtype=ROW, size=38.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733239331573 2024-12-03T15:22:12,964 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#B#compaction#348 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:12,964 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/53e3d9d1f6e94e7ca6d52060944fdd6e is 50, key is test_row_0/B:col10/1733239331595/Put/seqid=0 2024-12-03T15:22:12,965 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:12,967 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241203690deec813bb44d088c1f7b69c2a6173_a2e5b6b6d57ac0725cc77df907fce083 store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:12,968 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241203690deec813bb44d088c1f7b69c2a6173_a2e5b6b6d57ac0725cc77df907fce083, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:12,969 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203690deec813bb44d088c1f7b69c2a6173_a2e5b6b6d57ac0725cc77df907fce083 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:12,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742244_1420 (size=12815) 2024-12-03T15:22:13,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742245_1421 (size=4469) 2024-12-03T15:22:13,015 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#A#compaction#349 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:13,016 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/fb8c8fc4a9884a0e84511a9dced7821e is 175, key is test_row_0/A:col10/1733239331595/Put/seqid=0 2024-12-03T15:22:13,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:13,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-03T15:22:13,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:13,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:13,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:13,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:13,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:13,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:13,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742246_1422 (size=31769) 2024-12-03T15:22:13,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412032d069c51decd4c55b615e830c00850cd_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239332380/Put/seqid=0 2024-12-03T15:22:13,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742247_1423 (size=14994) 2024-12-03T15:22:13,077 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:13,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-03T15:22:13,091 INFO [Thread-1578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-12-03T15:22:13,097 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412032d069c51decd4c55b615e830c00850cd_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412032d069c51decd4c55b615e830c00850cd_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:13,099 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:13,100 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/bbbe36ffadc140f2a96d184b1791ffaf, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:13,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/bbbe36ffadc140f2a96d184b1791ffaf is 175, key is test_row_0/A:col10/1733239332380/Put/seqid=0 2024-12-03T15:22:13,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-12-03T15:22:13,103 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:13,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-03T15:22:13,103 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:13,103 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:13,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239393098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239393099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742248_1424 (size=39949) 2024-12-03T15:22:13,134 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=279, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/bbbe36ffadc140f2a96d184b1791ffaf 2024-12-03T15:22:13,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/01bb4a9e60d2423bb4fd52106c8d8ae5 is 50, key is test_row_0/B:col10/1733239332380/Put/seqid=0 2024-12-03T15:22:13,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-03T15:22:13,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742249_1425 (size=12301) 2024-12-03T15:22:13,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239393211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239393212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,254 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-03T15:22:13,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:13,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:13,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:13,258 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,377 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/53e3d9d1f6e94e7ca6d52060944fdd6e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/53e3d9d1f6e94e7ca6d52060944fdd6e 2024-12-03T15:22:13,388 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/B of a2e5b6b6d57ac0725cc77df907fce083 into 53e3d9d1f6e94e7ca6d52060944fdd6e(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:13,388 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:13,388 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/B, priority=13, startTime=1733239332946; duration=0sec 2024-12-03T15:22:13,388 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:13,388 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:B 2024-12-03T15:22:13,388 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:13,395 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:13,395 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/C is initiating minor compaction (all files) 2024-12-03T15:22:13,395 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/C in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:13,395 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8a1d0fbb45714e3c979b2a32a8450b31, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/5cb02d515bf249d58aca6354884fa24e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/162d7a307c724445a9c87c9c67bd78be] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=36.1 K 2024-12-03T15:22:13,399 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a1d0fbb45714e3c979b2a32a8450b31, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1733239330822 2024-12-03T15:22:13,399 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 5cb02d515bf249d58aca6354884fa24e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733239331206 2024-12-03T15:22:13,402 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 162d7a307c724445a9c87c9c67bd78be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733239331573 2024-12-03T15:22:13,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-03T15:22:13,414 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,418 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-03T15:22:13,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:13,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:13,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:13,418 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239393426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239393426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,450 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#C#compaction#352 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:13,451 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/dfb9ef32b4ff4809a2fac1e19aa7be2e is 50, key is test_row_0/C:col10/1733239331595/Put/seqid=0 2024-12-03T15:22:13,456 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/fb8c8fc4a9884a0e84511a9dced7821e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fb8c8fc4a9884a0e84511a9dced7821e 2024-12-03T15:22:13,478 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/A of a2e5b6b6d57ac0725cc77df907fce083 into fb8c8fc4a9884a0e84511a9dced7821e(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:13,478 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:13,478 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/A, priority=13, startTime=1733239332946; duration=0sec 2024-12-03T15:22:13,478 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:13,478 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:A 2024-12-03T15:22:13,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742250_1426 (size=12815) 2024-12-03T15:22:13,573 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,574 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-03T15:22:13,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:13,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:13,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:13,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/01bb4a9e60d2423bb4fd52106c8d8ae5 2024-12-03T15:22:13,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/6588755b8b8e416da7409647feeb0e4a is 50, key is test_row_0/C:col10/1733239332380/Put/seqid=0 2024-12-03T15:22:13,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742251_1427 (size=12301) 2024-12-03T15:22:13,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/6588755b8b8e416da7409647feeb0e4a 2024-12-03T15:22:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-03T15:22:13,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/bbbe36ffadc140f2a96d184b1791ffaf as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/bbbe36ffadc140f2a96d184b1791ffaf 2024-12-03T15:22:13,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/bbbe36ffadc140f2a96d184b1791ffaf, entries=200, sequenceid=279, filesize=39.0 K 2024-12-03T15:22:13,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/01bb4a9e60d2423bb4fd52106c8d8ae5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/01bb4a9e60d2423bb4fd52106c8d8ae5 2024-12-03T15:22:13,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-03T15:22:13,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:13,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:13,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:13,734 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239393729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239393731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,737 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/01bb4a9e60d2423bb4fd52106c8d8ae5, entries=150, sequenceid=279, filesize=12.0 K 2024-12-03T15:22:13,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/6588755b8b8e416da7409647feeb0e4a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/6588755b8b8e416da7409647feeb0e4a 2024-12-03T15:22:13,746 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/6588755b8b8e416da7409647feeb0e4a, entries=150, sequenceid=279, filesize=12.0 K 2024-12-03T15:22:13,747 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for a2e5b6b6d57ac0725cc77df907fce083 in 728ms, sequenceid=279, compaction requested=false 2024-12-03T15:22:13,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:13,765 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-03T15:22:13,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:13,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:13,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:13,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:13,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:13,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:13,787 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412035ae8ca463e064a8b8541cdc068953012_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239333765/Put/seqid=0 2024-12-03T15:22:13,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239393804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239393812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239393814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742252_1428 (size=12454) 2024-12-03T15:22:13,902 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,904 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/dfb9ef32b4ff4809a2fac1e19aa7be2e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/dfb9ef32b4ff4809a2fac1e19aa7be2e 2024-12-03T15:22:13,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-03T15:22:13,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:13,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:13,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:13,906 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:13,912 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/C of a2e5b6b6d57ac0725cc77df907fce083 into dfb9ef32b4ff4809a2fac1e19aa7be2e(size=12.5 K), total size for store is 24.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:13,912 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:13,912 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/C, priority=13, startTime=1733239332946; duration=0sec 2024-12-03T15:22:13,912 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:13,912 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:C 2024-12-03T15:22:13,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239393916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239393916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:13,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239393918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,060 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-03T15:22:14,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:14,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:14,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239394123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:14,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239394125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:14,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239394125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-03T15:22:14,217 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-03T15:22:14,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:14,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,237 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:14,241 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412035ae8ca463e064a8b8541cdc068953012_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412035ae8ca463e064a8b8541cdc068953012_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:14,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:14,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239394240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,244 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/5320f46382644856956387d1ece44bd0, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:14,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/5320f46382644856956387d1ece44bd0 is 175, key is test_row_0/A:col10/1733239333765/Put/seqid=0 2024-12-03T15:22:14,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:14,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239394242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742253_1429 (size=31255) 2024-12-03T15:22:14,249 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=300, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/5320f46382644856956387d1ece44bd0 2024-12-03T15:22:14,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/3a6d0400b93c4420814e1031f8a1a061 is 50, key is test_row_0/B:col10/1733239333765/Put/seqid=0 2024-12-03T15:22:14,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742254_1430 (size=12301) 2024-12-03T15:22:14,369 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-03T15:22:14,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:14,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:14,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239394431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:14,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239394431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:14,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239394435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,530 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-03T15:22:14,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:14,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,533 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,660 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/3a6d0400b93c4420814e1031f8a1a061 2024-12-03T15:22:14,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/45a6f67af1884f1eb91c756b0151154b is 50, key is test_row_0/C:col10/1733239333765/Put/seqid=0 2024-12-03T15:22:14,689 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,697 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-03T15:22:14,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:14,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,698 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:14,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742255_1431 (size=12301) 2024-12-03T15:22:14,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/45a6f67af1884f1eb91c756b0151154b 2024-12-03T15:22:14,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/5320f46382644856956387d1ece44bd0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5320f46382644856956387d1ece44bd0 2024-12-03T15:22:14,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5320f46382644856956387d1ece44bd0, entries=150, sequenceid=300, filesize=30.5 K 2024-12-03T15:22:14,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/3a6d0400b93c4420814e1031f8a1a061 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/3a6d0400b93c4420814e1031f8a1a061 2024-12-03T15:22:14,803 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/3a6d0400b93c4420814e1031f8a1a061, entries=150, sequenceid=300, filesize=12.0 K 2024-12-03T15:22:14,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/45a6f67af1884f1eb91c756b0151154b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/45a6f67af1884f1eb91c756b0151154b 2024-12-03T15:22:14,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/45a6f67af1884f1eb91c756b0151154b, entries=150, sequenceid=300, filesize=12.0 K 2024-12-03T15:22:14,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for a2e5b6b6d57ac0725cc77df907fce083 in 1044ms, sequenceid=300, compaction requested=true 2024-12-03T15:22:14,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:14,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:14,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:14,809 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:14,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:14,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:14,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:14,810 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:22:14,812 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102973 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:14,812 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/A is initiating minor compaction (all files) 2024-12-03T15:22:14,812 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/A in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,812 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fb8c8fc4a9884a0e84511a9dced7821e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/bbbe36ffadc140f2a96d184b1791ffaf, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5320f46382644856956387d1ece44bd0] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=100.6 K 2024-12-03T15:22:14,812 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,812 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fb8c8fc4a9884a0e84511a9dced7821e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/bbbe36ffadc140f2a96d184b1791ffaf, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5320f46382644856956387d1ece44bd0] 2024-12-03T15:22:14,813 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb8c8fc4a9884a0e84511a9dced7821e, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733239331573 2024-12-03T15:22:14,813 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbbe36ffadc140f2a96d184b1791ffaf, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1733239332355 2024-12-03T15:22:14,813 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5320f46382644856956387d1ece44bd0, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1733239333095 2024-12-03T15:22:14,813 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:14,825 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37417 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:14,825 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/B is initiating minor compaction (all files) 2024-12-03T15:22:14,825 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/B in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,825 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/53e3d9d1f6e94e7ca6d52060944fdd6e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/01bb4a9e60d2423bb4fd52106c8d8ae5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/3a6d0400b93c4420814e1031f8a1a061] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=36.5 K 2024-12-03T15:22:14,826 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:14,828 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241203c88bafe9bfd744f0bfd4a7f7b48d332b_a2e5b6b6d57ac0725cc77df907fce083 store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:14,830 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 53e3d9d1f6e94e7ca6d52060944fdd6e, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733239331573 2024-12-03T15:22:14,830 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 01bb4a9e60d2423bb4fd52106c8d8ae5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1733239332380 2024-12-03T15:22:14,830 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241203c88bafe9bfd744f0bfd4a7f7b48d332b_a2e5b6b6d57ac0725cc77df907fce083, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:14,830 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203c88bafe9bfd744f0bfd4a7f7b48d332b_a2e5b6b6d57ac0725cc77df907fce083 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:14,830 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a6d0400b93c4420814e1031f8a1a061, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1733239333095 2024-12-03T15:22:14,858 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:14,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-03T15:22:14,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,859 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-03T15:22:14,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:14,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:14,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:14,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:14,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:14,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:14,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742256_1432 (size=4469) 2024-12-03T15:22:14,877 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#B#compaction#358 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:14,877 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/edc1b8889b1e45cd86ed3aff09d5f38b is 50, key is test_row_0/B:col10/1733239333765/Put/seqid=0 2024-12-03T15:22:14,894 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#A#compaction#357 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:14,895 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/2289c767b13f49b89717a175c7f85204 is 175, key is test_row_0/A:col10/1733239333765/Put/seqid=0 2024-12-03T15:22:14,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412039ce32fe20a0547c0b4f8f77ca9ea9f29_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239333802/Put/seqid=0 2024-12-03T15:22:14,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742257_1433 (size=13017) 2024-12-03T15:22:14,940 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/edc1b8889b1e45cd86ed3aff09d5f38b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/edc1b8889b1e45cd86ed3aff09d5f38b 2024-12-03T15:22:14,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742258_1434 (size=31971) 2024-12-03T15:22:14,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:14,949 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:14,961 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/B of a2e5b6b6d57ac0725cc77df907fce083 into edc1b8889b1e45cd86ed3aff09d5f38b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:14,961 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:14,961 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/B, priority=13, startTime=1733239334810; duration=0sec 2024-12-03T15:22:14,961 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:14,962 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:B 2024-12-03T15:22:14,962 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:14,963 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37417 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:14,963 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/C is initiating minor compaction (all files) 2024-12-03T15:22:14,963 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/C in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:14,963 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/dfb9ef32b4ff4809a2fac1e19aa7be2e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/6588755b8b8e416da7409647feeb0e4a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/45a6f67af1884f1eb91c756b0151154b] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=36.5 K 2024-12-03T15:22:14,964 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting dfb9ef32b4ff4809a2fac1e19aa7be2e, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1733239331573 2024-12-03T15:22:14,964 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 6588755b8b8e416da7409647feeb0e4a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1733239332380 2024-12-03T15:22:14,965 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/2289c767b13f49b89717a175c7f85204 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2289c767b13f49b89717a175c7f85204 2024-12-03T15:22:14,965 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 45a6f67af1884f1eb91c756b0151154b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1733239333095 2024-12-03T15:22:14,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742259_1435 (size=12454) 2024-12-03T15:22:14,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:14,978 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/A of a2e5b6b6d57ac0725cc77df907fce083 into 2289c767b13f49b89717a175c7f85204(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:14,978 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:14,978 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/A, priority=13, startTime=1733239334809; duration=0sec 2024-12-03T15:22:14,978 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:14,979 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:A 2024-12-03T15:22:14,982 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412039ce32fe20a0547c0b4f8f77ca9ea9f29_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412039ce32fe20a0547c0b4f8f77ca9ea9f29_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:14,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/1e02bed791574505962cc63e03a185b5, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:14,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/1e02bed791574505962cc63e03a185b5 is 175, key is test_row_0/A:col10/1733239333802/Put/seqid=0 2024-12-03T15:22:14,988 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#C#compaction#360 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:14,988 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/032ceb9448b3408b8145add93b1e2435 is 50, key is test_row_0/C:col10/1733239333765/Put/seqid=0 2024-12-03T15:22:15,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239394998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239395000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239395001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742260_1436 (size=31255) 2024-12-03T15:22:15,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742261_1437 (size=13017) 2024-12-03T15:22:15,041 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/032ceb9448b3408b8145add93b1e2435 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/032ceb9448b3408b8145add93b1e2435 2024-12-03T15:22:15,049 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/C of a2e5b6b6d57ac0725cc77df907fce083 into 032ceb9448b3408b8145add93b1e2435(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:15,049 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:15,049 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/C, priority=13, startTime=1733239334810; duration=0sec 2024-12-03T15:22:15,049 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:15,049 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:C 2024-12-03T15:22:15,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239395108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239395108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239395108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-03T15:22:15,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239395244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239395254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239395311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239395312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239395312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,421 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/1e02bed791574505962cc63e03a185b5 2024-12-03T15:22:15,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/c385a42a9f9742fe8c2ef3123458dc6d is 50, key is test_row_0/B:col10/1733239333802/Put/seqid=0 2024-12-03T15:22:15,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742262_1438 (size=12301) 2024-12-03T15:22:15,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239395617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239395617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:15,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239395618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:15,835 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/c385a42a9f9742fe8c2ef3123458dc6d 2024-12-03T15:22:15,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/eefa2cfdd01942cbaf481a3bcbce1d6a is 50, key is test_row_0/C:col10/1733239333802/Put/seqid=0 2024-12-03T15:22:15,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742263_1439 (size=12301) 2024-12-03T15:22:16,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:16,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239396124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:16,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:16,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239396131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:16,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:16,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239396131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:16,259 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/eefa2cfdd01942cbaf481a3bcbce1d6a 2024-12-03T15:22:16,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/1e02bed791574505962cc63e03a185b5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/1e02bed791574505962cc63e03a185b5 2024-12-03T15:22:16,269 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/1e02bed791574505962cc63e03a185b5, entries=150, sequenceid=318, filesize=30.5 K 2024-12-03T15:22:16,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/c385a42a9f9742fe8c2ef3123458dc6d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/c385a42a9f9742fe8c2ef3123458dc6d 2024-12-03T15:22:16,274 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/c385a42a9f9742fe8c2ef3123458dc6d, entries=150, sequenceid=318, filesize=12.0 K 2024-12-03T15:22:16,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/eefa2cfdd01942cbaf481a3bcbce1d6a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/eefa2cfdd01942cbaf481a3bcbce1d6a 2024-12-03T15:22:16,278 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/eefa2cfdd01942cbaf481a3bcbce1d6a, entries=150, sequenceid=318, filesize=12.0 K 2024-12-03T15:22:16,279 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for a2e5b6b6d57ac0725cc77df907fce083 in 1420ms, sequenceid=318, compaction requested=false 2024-12-03T15:22:16,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:16,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:16,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-12-03T15:22:16,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-12-03T15:22:16,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-03T15:22:16,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1770 sec 2024-12-03T15:22:16,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 3.1830 sec 2024-12-03T15:22:17,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:17,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-03T15:22:17,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:17,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:17,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:17,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:17,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:17,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:17,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120324444d06222146469fc9300b02a6a889_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239334996/Put/seqid=0 2024-12-03T15:22:17,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239397160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239397163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742264_1440 (size=12454) 2024-12-03T15:22:17,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239397169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-03T15:22:17,218 INFO [Thread-1578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-12-03T15:22:17,220 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:17,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-12-03T15:22:17,222 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:17,222 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:17,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:17,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-03T15:22:17,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33608 deadline: 1733239397256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,262 DEBUG [Thread-1570 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:22:17,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33638 deadline: 1733239397264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,270 DEBUG [Thread-1576 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4172 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:22:17,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239397269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239397271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239397274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-03T15:22:17,374 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-03T15:22:17,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:17,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:17,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:17,377 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239397477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239397480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239397481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-03T15:22:17,530 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-03T15:22:17,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:17,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:17,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:17,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,573 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:17,586 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120324444d06222146469fc9300b02a6a889_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120324444d06222146469fc9300b02a6a889_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:17,591 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/73a87fca83d74781824e212c013b087f, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:17,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/73a87fca83d74781824e212c013b087f is 175, key is test_row_0/A:col10/1733239334996/Put/seqid=0 2024-12-03T15:22:17,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742265_1441 (size=31255) 2024-12-03T15:22:17,615 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=342, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/73a87fca83d74781824e212c013b087f 2024-12-03T15:22:17,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/62c90f9072c74638b7a99d84b60d2ab3 is 50, key is test_row_0/B:col10/1733239334996/Put/seqid=0 2024-12-03T15:22:17,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742266_1442 (size=12301) 2024-12-03T15:22:17,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/62c90f9072c74638b7a99d84b60d2ab3 2024-12-03T15:22:17,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/921334b55e324d40a35f1cc4c5eeee64 is 50, key is test_row_0/C:col10/1733239334996/Put/seqid=0 2024-12-03T15:22:17,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742267_1443 (size=12301) 2024-12-03T15:22:17,684 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-03T15:22:17,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:17,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:17,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:17,685 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239397786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239397787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:17,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239397787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-03T15:22:17,838 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-03T15:22:17,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:17,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:17,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:17,839 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,990 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:17,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-03T15:22:17,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:17,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:17,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:17,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:17,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:18,061 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/921334b55e324d40a35f1cc4c5eeee64 2024-12-03T15:22:18,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/73a87fca83d74781824e212c013b087f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/73a87fca83d74781824e212c013b087f 2024-12-03T15:22:18,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/73a87fca83d74781824e212c013b087f, entries=150, sequenceid=342, filesize=30.5 K 2024-12-03T15:22:18,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/62c90f9072c74638b7a99d84b60d2ab3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/62c90f9072c74638b7a99d84b60d2ab3 2024-12-03T15:22:18,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/62c90f9072c74638b7a99d84b60d2ab3, entries=150, sequenceid=342, filesize=12.0 K 2024-12-03T15:22:18,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/921334b55e324d40a35f1cc4c5eeee64 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/921334b55e324d40a35f1cc4c5eeee64 2024-12-03T15:22:18,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/921334b55e324d40a35f1cc4c5eeee64, entries=150, sequenceid=342, filesize=12.0 K 2024-12-03T15:22:18,091 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for a2e5b6b6d57ac0725cc77df907fce083 in 952ms, sequenceid=342, compaction requested=true 2024-12-03T15:22:18,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:18,091 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:18,091 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:18,091 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:18,091 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:18,091 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:18,091 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:18,091 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:18,091 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:18,095 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:18,095 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:18,095 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/A is initiating minor compaction (all files) 2024-12-03T15:22:18,095 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/B is initiating minor compaction (all files) 2024-12-03T15:22:18,095 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/A in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:18,095 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/B in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:18,096 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2289c767b13f49b89717a175c7f85204, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/1e02bed791574505962cc63e03a185b5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/73a87fca83d74781824e212c013b087f] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=92.3 K 2024-12-03T15:22:18,096 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/edc1b8889b1e45cd86ed3aff09d5f38b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/c385a42a9f9742fe8c2ef3123458dc6d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/62c90f9072c74638b7a99d84b60d2ab3] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=36.7 K 2024-12-03T15:22:18,096 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:18,096 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2289c767b13f49b89717a175c7f85204, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/1e02bed791574505962cc63e03a185b5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/73a87fca83d74781824e212c013b087f] 2024-12-03T15:22:18,096 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting edc1b8889b1e45cd86ed3aff09d5f38b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1733239333095 2024-12-03T15:22:18,096 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c385a42a9f9742fe8c2ef3123458dc6d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733239333802 2024-12-03T15:22:18,096 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2289c767b13f49b89717a175c7f85204, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1733239333095 2024-12-03T15:22:18,097 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 62c90f9072c74638b7a99d84b60d2ab3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733239334996 2024-12-03T15:22:18,097 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e02bed791574505962cc63e03a185b5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733239333802 2024-12-03T15:22:18,097 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73a87fca83d74781824e212c013b087f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733239334996 2024-12-03T15:22:18,105 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#B#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:18,105 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/b129afb232754f0f87336c411945e858 is 50, key is test_row_0/B:col10/1733239334996/Put/seqid=0 2024-12-03T15:22:18,114 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:18,131 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412033e6f403672e14f5a8422d1b21f6adee7_a2e5b6b6d57ac0725cc77df907fce083 store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:18,133 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412033e6f403672e14f5a8422d1b21f6adee7_a2e5b6b6d57ac0725cc77df907fce083, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:18,133 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412033e6f403672e14f5a8422d1b21f6adee7_a2e5b6b6d57ac0725cc77df907fce083 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:18,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742268_1444 (size=13119) 2024-12-03T15:22:18,145 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:18,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-03T15:22:18,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:18,147 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-03T15:22:18,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:18,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:18,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:18,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:18,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:18,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:18,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742269_1445 (size=4469) 2024-12-03T15:22:18,151 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#A#compaction#367 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:18,152 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/5b1b833ca4f7490b90faedd6ea85c478 is 175, key is test_row_0/A:col10/1733239334996/Put/seqid=0 2024-12-03T15:22:18,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120354c25e54f4664a7a8418125b1ff60788_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239337158/Put/seqid=0 2024-12-03T15:22:18,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742270_1446 (size=32073) 2024-12-03T15:22:18,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742271_1447 (size=12454) 2024-12-03T15:22:18,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:18,206 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120354c25e54f4664a7a8418125b1ff60788_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120354c25e54f4664a7a8418125b1ff60788_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:18,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/55160d2a5a674bf6913190d44a06a56b, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:18,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/55160d2a5a674bf6913190d44a06a56b is 175, key is test_row_0/A:col10/1733239337158/Put/seqid=0 2024-12-03T15:22:18,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742272_1448 (size=31255) 2024-12-03T15:22:18,230 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=357, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/55160d2a5a674bf6913190d44a06a56b 2024-12-03T15:22:18,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/e276f91dd24d45839a9e79acbf4a7068 is 50, key is test_row_0/B:col10/1733239337158/Put/seqid=0 2024-12-03T15:22:18,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742273_1449 (size=12301) 2024-12-03T15:22:18,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:18,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:18,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-03T15:22:18,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:18,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239398340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:18,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:18,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239398342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:18,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:18,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239398342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:18,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239398462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:18,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239398462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:18,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239398462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:18,547 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/b129afb232754f0f87336c411945e858 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/b129afb232754f0f87336c411945e858 2024-12-03T15:22:18,555 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/B of a2e5b6b6d57ac0725cc77df907fce083 into b129afb232754f0f87336c411945e858(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:18,555 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:18,555 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/B, priority=13, startTime=1733239338091; duration=0sec 2024-12-03T15:22:18,555 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:18,555 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:B 2024-12-03T15:22:18,555 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:18,556 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:18,556 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/C is initiating minor compaction (all files) 2024-12-03T15:22:18,556 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/C in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:18,556 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/032ceb9448b3408b8145add93b1e2435, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/eefa2cfdd01942cbaf481a3bcbce1d6a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/921334b55e324d40a35f1cc4c5eeee64] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=36.7 K 2024-12-03T15:22:18,557 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 032ceb9448b3408b8145add93b1e2435, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1733239333095 2024-12-03T15:22:18,557 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting eefa2cfdd01942cbaf481a3bcbce1d6a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733239333802 2024-12-03T15:22:18,557 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 921334b55e324d40a35f1cc4c5eeee64, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733239334996 2024-12-03T15:22:18,563 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#C#compaction#370 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:18,564 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/0a8d0a0386d34c1785bd56ae0f9455cf is 50, key is test_row_0/C:col10/1733239334996/Put/seqid=0 2024-12-03T15:22:18,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742274_1450 (size=13119) 2024-12-03T15:22:18,579 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/0a8d0a0386d34c1785bd56ae0f9455cf as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/0a8d0a0386d34c1785bd56ae0f9455cf 2024-12-03T15:22:18,587 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/C of a2e5b6b6d57ac0725cc77df907fce083 into 0a8d0a0386d34c1785bd56ae0f9455cf(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:18,587 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:18,587 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/C, priority=13, startTime=1733239338091; duration=0sec 2024-12-03T15:22:18,587 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:18,587 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:C 2024-12-03T15:22:18,595 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/5b1b833ca4f7490b90faedd6ea85c478 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5b1b833ca4f7490b90faedd6ea85c478 2024-12-03T15:22:18,598 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/A of a2e5b6b6d57ac0725cc77df907fce083 into 5b1b833ca4f7490b90faedd6ea85c478(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:18,598 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:18,598 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/A, priority=13, startTime=1733239338091; duration=0sec 2024-12-03T15:22:18,598 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:18,598 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:A 2024-12-03T15:22:18,660 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/e276f91dd24d45839a9e79acbf4a7068 2024-12-03T15:22:18,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:18,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239398664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:18,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:18,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239398666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:18,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:18,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239398667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:18,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/0de55cfca3b14319932396975ab0c4af is 50, key is test_row_0/C:col10/1733239337158/Put/seqid=0 2024-12-03T15:22:18,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742275_1451 (size=12301) 2024-12-03T15:22:18,681 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/0de55cfca3b14319932396975ab0c4af 2024-12-03T15:22:18,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/55160d2a5a674bf6913190d44a06a56b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/55160d2a5a674bf6913190d44a06a56b 2024-12-03T15:22:18,708 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/55160d2a5a674bf6913190d44a06a56b, entries=150, sequenceid=357, filesize=30.5 K 2024-12-03T15:22:18,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/e276f91dd24d45839a9e79acbf4a7068 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/e276f91dd24d45839a9e79acbf4a7068 2024-12-03T15:22:18,717 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/e276f91dd24d45839a9e79acbf4a7068, entries=150, sequenceid=357, filesize=12.0 K 2024-12-03T15:22:18,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/0de55cfca3b14319932396975ab0c4af as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/0de55cfca3b14319932396975ab0c4af 2024-12-03T15:22:18,722 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/0de55cfca3b14319932396975ab0c4af, entries=150, sequenceid=357, filesize=12.0 K 2024-12-03T15:22:18,723 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for a2e5b6b6d57ac0725cc77df907fce083 in 575ms, sequenceid=357, compaction requested=false 2024-12-03T15:22:18,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:18,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:18,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-03T15:22:18,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-12-03T15:22:18,725 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-03T15:22:18,725 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5020 sec 2024-12-03T15:22:18,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 1.5050 sec 2024-12-03T15:22:18,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:18,974 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-03T15:22:18,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:18,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:18,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:18,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:18,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:18,975 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:18,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203f20d71d808224421b93c19276a001c10_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239338332/Put/seqid=0 2024-12-03T15:22:18,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742276_1452 (size=14994) 2024-12-03T15:22:18,990 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:18,994 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203f20d71d808224421b93c19276a001c10_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203f20d71d808224421b93c19276a001c10_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:18,995 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/ef9ab8f29fe6418ca1e15d640b4ab3c8, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:18,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/ef9ab8f29fe6418ca1e15d640b4ab3c8 is 175, key is test_row_0/A:col10/1733239338332/Put/seqid=0 2024-12-03T15:22:18,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:18,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239398991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:19,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742277_1453 (size=39949) 2024-12-03T15:22:19,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239398993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:19,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239399002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:19,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239399097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239399102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239399106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:19,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239399302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:19,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239399307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:19,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239399311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-03T15:22:19,327 INFO [Thread-1578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-12-03T15:22:19,328 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:19,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-03T15:22:19,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-03T15:22:19,329 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:19,330 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:19,330 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:19,403 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=382, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/ef9ab8f29fe6418ca1e15d640b4ab3c8 2024-12-03T15:22:19,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/91683272b18b4f74b9f95b0298e6b5b8 is 50, key is test_row_0/B:col10/1733239338332/Put/seqid=0 2024-12-03T15:22:19,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742278_1454 (size=12301) 2024-12-03T15:22:19,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-03T15:22:19,482 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,482 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-03T15:22:19,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:19,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:19,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:19,482 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:19,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:19,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:19,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:19,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239399609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:19,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239399614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:19,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239399618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-03T15:22:19,634 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-03T15:22:19,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:19,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:19,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:19,635 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:19,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:19,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:19,794 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-03T15:22:19,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:19,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:19,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:19,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:19,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:19,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:19,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/91683272b18b4f74b9f95b0298e6b5b8 2024-12-03T15:22:19,839 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/1ea843ff611e45e999a1abd242ebdc36 is 50, key is test_row_0/C:col10/1733239338332/Put/seqid=0 2024-12-03T15:22:19,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742279_1455 (size=12301) 2024-12-03T15:22:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-03T15:22:19,949 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:19,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-03T15:22:19,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:19,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:19,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:19,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:19,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:19,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:20,102 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:20,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-03T15:22:20,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:20,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:20,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:20,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:20,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:20,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:20,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:20,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33596 deadline: 1733239400119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:20,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:20,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33568 deadline: 1733239400123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:20,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:20,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33584 deadline: 1733239400127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:20,257 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:20,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-03T15:22:20,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:20,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:20,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:20,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:20,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:20,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:20,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/1ea843ff611e45e999a1abd242ebdc36 2024-12-03T15:22:20,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/ef9ab8f29fe6418ca1e15d640b4ab3c8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ef9ab8f29fe6418ca1e15d640b4ab3c8 2024-12-03T15:22:20,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ef9ab8f29fe6418ca1e15d640b4ab3c8, entries=200, sequenceid=382, filesize=39.0 K 2024-12-03T15:22:20,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/91683272b18b4f74b9f95b0298e6b5b8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/91683272b18b4f74b9f95b0298e6b5b8 2024-12-03T15:22:20,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/91683272b18b4f74b9f95b0298e6b5b8, entries=150, sequenceid=382, filesize=12.0 K 2024-12-03T15:22:20,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/1ea843ff611e45e999a1abd242ebdc36 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/1ea843ff611e45e999a1abd242ebdc36 2024-12-03T15:22:20,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/1ea843ff611e45e999a1abd242ebdc36, entries=150, sequenceid=382, filesize=12.0 K 2024-12-03T15:22:20,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for a2e5b6b6d57ac0725cc77df907fce083 in 1326ms, sequenceid=382, compaction requested=true 2024-12-03T15:22:20,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:20,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:20,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:20,300 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:20,300 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:20,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:20,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:20,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2e5b6b6d57ac0725cc77df907fce083:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:20,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:20,307 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:20,307 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103277 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:20,307 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/A is initiating minor compaction (all files) 2024-12-03T15:22:20,307 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/B is initiating minor compaction (all files) 2024-12-03T15:22:20,307 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/A in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:20,307 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/B in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:20,307 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5b1b833ca4f7490b90faedd6ea85c478, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/55160d2a5a674bf6913190d44a06a56b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ef9ab8f29fe6418ca1e15d640b4ab3c8] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=100.9 K 2024-12-03T15:22:20,307 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/b129afb232754f0f87336c411945e858, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/e276f91dd24d45839a9e79acbf4a7068, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/91683272b18b4f74b9f95b0298e6b5b8] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=36.8 K 2024-12-03T15:22:20,307 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:20,307 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5b1b833ca4f7490b90faedd6ea85c478, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/55160d2a5a674bf6913190d44a06a56b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ef9ab8f29fe6418ca1e15d640b4ab3c8] 2024-12-03T15:22:20,307 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b129afb232754f0f87336c411945e858, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733239334996 2024-12-03T15:22:20,307 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b1b833ca4f7490b90faedd6ea85c478, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733239334996 2024-12-03T15:22:20,308 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55160d2a5a674bf6913190d44a06a56b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733239337158 2024-12-03T15:22:20,308 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting e276f91dd24d45839a9e79acbf4a7068, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733239337158 2024-12-03T15:22:20,308 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef9ab8f29fe6418ca1e15d640b4ab3c8, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733239338332 2024-12-03T15:22:20,308 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 91683272b18b4f74b9f95b0298e6b5b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733239338332 2024-12-03T15:22:20,325 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#B#compaction#375 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:20,325 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:20,326 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/1be784842a944581a1fbe01d7bc04ba3 is 50, key is test_row_0/B:col10/1733239338332/Put/seqid=0 2024-12-03T15:22:20,328 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241203a0431d9592ed4d1290d7402bf5778637_a2e5b6b6d57ac0725cc77df907fce083 store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:20,329 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241203a0431d9592ed4d1290d7402bf5778637_a2e5b6b6d57ac0725cc77df907fce083, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:20,329 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203a0431d9592ed4d1290d7402bf5778637_a2e5b6b6d57ac0725cc77df907fce083 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:20,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742280_1456 (size=13221) 2024-12-03T15:22:20,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742281_1457 (size=4469) 2024-12-03T15:22:20,356 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#A#compaction#376 average throughput is 0.79 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:20,356 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/ab8ddefadf71414f90c928f5a10200ad is 175, key is test_row_0/A:col10/1733239338332/Put/seqid=0 2024-12-03T15:22:20,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742282_1458 (size=32175) 2024-12-03T15:22:20,374 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/ab8ddefadf71414f90c928f5a10200ad as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ab8ddefadf71414f90c928f5a10200ad 2024-12-03T15:22:20,378 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/A of a2e5b6b6d57ac0725cc77df907fce083 into ab8ddefadf71414f90c928f5a10200ad(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:20,378 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:20,378 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/A, priority=13, startTime=1733239340300; duration=0sec 2024-12-03T15:22:20,378 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:20,378 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:A 2024-12-03T15:22:20,379 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:20,380 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:20,380 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): a2e5b6b6d57ac0725cc77df907fce083/C is initiating minor compaction (all files) 2024-12-03T15:22:20,380 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of a2e5b6b6d57ac0725cc77df907fce083/C in TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:20,380 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/0a8d0a0386d34c1785bd56ae0f9455cf, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/0de55cfca3b14319932396975ab0c4af, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/1ea843ff611e45e999a1abd242ebdc36] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp, totalSize=36.8 K 2024-12-03T15:22:20,381 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a8d0a0386d34c1785bd56ae0f9455cf, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733239334996 2024-12-03T15:22:20,382 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0de55cfca3b14319932396975ab0c4af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733239337158 2024-12-03T15:22:20,382 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ea843ff611e45e999a1abd242ebdc36, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733239338332 2024-12-03T15:22:20,391 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2e5b6b6d57ac0725cc77df907fce083#C#compaction#377 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:20,391 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/a265bc7266934a81a016c9a458e88253 is 50, key is test_row_0/C:col10/1733239338332/Put/seqid=0 2024-12-03T15:22:20,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742283_1459 (size=13221) 2024-12-03T15:22:20,411 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:20,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-03T15:22:20,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:20,411 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-03T15:22:20,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:20,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:20,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:20,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:20,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:20,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:20,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203fff823e65de94cc1908c524f1786263d_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239338992/Put/seqid=0 2024-12-03T15:22:20,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-03T15:22:20,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742284_1460 (size=12454) 2024-12-03T15:22:20,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:20,446 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203fff823e65de94cc1908c524f1786263d_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203fff823e65de94cc1908c524f1786263d_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:20,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/dca8f6291b3e422aaafc7229f3e2b187, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:20,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/dca8f6291b3e422aaafc7229f3e2b187 is 175, key is test_row_0/A:col10/1733239338992/Put/seqid=0 2024-12-03T15:22:20,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742285_1461 (size=31255) 2024-12-03T15:22:20,679 DEBUG [Thread-1585 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b308f62 to 127.0.0.1:60989 2024-12-03T15:22:20,679 DEBUG [Thread-1579 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f49665c to 127.0.0.1:60989 2024-12-03T15:22:20,679 DEBUG [Thread-1585 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:20,679 DEBUG [Thread-1579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:20,682 DEBUG [Thread-1587 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68035c67 to 127.0.0.1:60989 2024-12-03T15:22:20,682 DEBUG [Thread-1587 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:20,682 DEBUG [Thread-1581 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683f8469 to 127.0.0.1:60989 2024-12-03T15:22:20,683 DEBUG [Thread-1581 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:20,683 DEBUG [Thread-1583 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e4d3d0 to 127.0.0.1:60989 2024-12-03T15:22:20,683 DEBUG [Thread-1583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:20,752 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/1be784842a944581a1fbe01d7bc04ba3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/1be784842a944581a1fbe01d7bc04ba3 2024-12-03T15:22:20,757 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/B of a2e5b6b6d57ac0725cc77df907fce083 into 1be784842a944581a1fbe01d7bc04ba3(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:20,757 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:20,757 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/B, priority=13, startTime=1733239340300; duration=0sec 2024-12-03T15:22:20,758 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:20,758 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:B 2024-12-03T15:22:20,809 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/a265bc7266934a81a016c9a458e88253 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/a265bc7266934a81a016c9a458e88253 2024-12-03T15:22:20,813 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in a2e5b6b6d57ac0725cc77df907fce083/C of a2e5b6b6d57ac0725cc77df907fce083 into a265bc7266934a81a016c9a458e88253(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:20,813 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:20,813 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083., storeName=a2e5b6b6d57ac0725cc77df907fce083/C, priority=13, startTime=1733239340300; duration=0sec 2024-12-03T15:22:20,813 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:20,813 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2e5b6b6d57ac0725cc77df907fce083:C 2024-12-03T15:22:20,867 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=397, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/dca8f6291b3e422aaafc7229f3e2b187 2024-12-03T15:22:20,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/2f6f692183e44793abf2f84154fce102 is 50, key is test_row_0/B:col10/1733239338992/Put/seqid=0 2024-12-03T15:22:20,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742286_1462 (size=12301) 2024-12-03T15:22:20,896 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/2f6f692183e44793abf2f84154fce102 2024-12-03T15:22:20,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/405459c22aa6424292eca19cabc1dd76 is 50, key is test_row_0/C:col10/1733239338992/Put/seqid=0 2024-12-03T15:22:20,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742287_1463 (size=12301) 2024-12-03T15:22:21,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:21,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. as already flushing 2024-12-03T15:22:21,127 DEBUG [Thread-1572 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dd5b441 to 127.0.0.1:60989 2024-12-03T15:22:21,127 DEBUG [Thread-1572 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:21,128 DEBUG [Thread-1568 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b6cf8cb to 127.0.0.1:60989 2024-12-03T15:22:21,128 DEBUG [Thread-1568 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:21,138 DEBUG [Thread-1574 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c336ea4 to 127.0.0.1:60989 2024-12-03T15:22:21,138 DEBUG [Thread-1574 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:21,267 DEBUG [Thread-1570 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ec15031 to 127.0.0.1:60989 2024-12-03T15:22:21,267 DEBUG [Thread-1570 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:21,301 DEBUG [Thread-1576 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f94d721 to 127.0.0.1:60989 2024-12-03T15:22:21,301 DEBUG [Thread-1576 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:21,331 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/405459c22aa6424292eca19cabc1dd76 2024-12-03T15:22:21,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/dca8f6291b3e422aaafc7229f3e2b187 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/dca8f6291b3e422aaafc7229f3e2b187 2024-12-03T15:22:21,340 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/dca8f6291b3e422aaafc7229f3e2b187, entries=150, sequenceid=397, filesize=30.5 K 2024-12-03T15:22:21,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/2f6f692183e44793abf2f84154fce102 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/2f6f692183e44793abf2f84154fce102 2024-12-03T15:22:21,345 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/2f6f692183e44793abf2f84154fce102, entries=150, sequenceid=397, filesize=12.0 K 2024-12-03T15:22:21,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/405459c22aa6424292eca19cabc1dd76 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/405459c22aa6424292eca19cabc1dd76 2024-12-03T15:22:21,350 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/405459c22aa6424292eca19cabc1dd76, entries=150, sequenceid=397, filesize=12.0 K 2024-12-03T15:22:21,351 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=33.54 KB/34350 for a2e5b6b6d57ac0725cc77df907fce083 in 940ms, sequenceid=397, compaction requested=false 2024-12-03T15:22:21,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:21,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:21,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-03T15:22:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-03T15:22:21,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-03T15:22:21,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0220 sec 2024-12-03T15:22:21,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 2.0260 sec 2024-12-03T15:22:21,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-03T15:22:21,434 INFO [Thread-1578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 106 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 27 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1756 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5268 rows 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1729 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5187 rows 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1730 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5190 rows 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1717 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5151 rows 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1718 2024-12-03T15:22:21,434 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5154 rows 2024-12-03T15:22:21,434 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-03T15:22:21,434 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b82ba2a to 127.0.0.1:60989 2024-12-03T15:22:21,434 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:21,437 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-03T15:22:21,438 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-03T15:22:21,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:21,444 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239341444"}]},"ts":"1733239341444"} 2024-12-03T15:22:21,445 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-03T15:22:21,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-03T15:22:21,447 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-03T15:22:21,448 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-03T15:22:21,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a2e5b6b6d57ac0725cc77df907fce083, UNASSIGN}] 2024-12-03T15:22:21,451 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=a2e5b6b6d57ac0725cc77df907fce083, UNASSIGN 2024-12-03T15:22:21,451 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=108 updating hbase:meta row=a2e5b6b6d57ac0725cc77df907fce083, regionState=CLOSING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:21,452 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T15:22:21,452 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; CloseRegionProcedure a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:22:21,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-03T15:22:21,604 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:21,604 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] handler.UnassignRegionHandler(124): Close a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:21,604 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-03T15:22:21,604 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HRegion(1681): Closing a2e5b6b6d57ac0725cc77df907fce083, disabling compactions & flushes 2024-12-03T15:22:21,604 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:21,604 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:21,604 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. after waiting 0 ms 2024-12-03T15:22:21,604 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:21,605 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HRegion(2837): Flushing a2e5b6b6d57ac0725cc77df907fce083 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-03T15:22:21,605 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=A 2024-12-03T15:22:21,605 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:21,605 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=B 2024-12-03T15:22:21,605 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:21,605 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK a2e5b6b6d57ac0725cc77df907fce083, store=C 2024-12-03T15:22:21,605 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:21,627 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412032ee2d34868c8485f8326f04e8a2a0778_a2e5b6b6d57ac0725cc77df907fce083 is 50, key is test_row_0/A:col10/1733239341265/Put/seqid=0 2024-12-03T15:22:21,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742288_1464 (size=12454) 2024-12-03T15:22:21,645 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:21,649 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412032ee2d34868c8485f8326f04e8a2a0778_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412032ee2d34868c8485f8326f04e8a2a0778_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:21,650 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/d7186173fec14218a92126ec311a0747, store: [table=TestAcidGuarantees family=A region=a2e5b6b6d57ac0725cc77df907fce083] 2024-12-03T15:22:21,650 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/d7186173fec14218a92126ec311a0747 is 175, key is test_row_0/A:col10/1733239341265/Put/seqid=0 2024-12-03T15:22:21,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742289_1465 (size=31255) 2024-12-03T15:22:21,670 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=407, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/d7186173fec14218a92126ec311a0747 2024-12-03T15:22:21,679 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/a7d40ddd9cb1464ca2f5c4097a8ad3de is 50, key is test_row_0/B:col10/1733239341265/Put/seqid=0 2024-12-03T15:22:21,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742290_1466 (size=12301) 2024-12-03T15:22:21,688 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/a7d40ddd9cb1464ca2f5c4097a8ad3de 2024-12-03T15:22:21,697 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/2d5a2b76943e4b23bcc1787f264fec86 is 50, key is test_row_0/C:col10/1733239341265/Put/seqid=0 2024-12-03T15:22:21,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742291_1467 (size=12301) 2024-12-03T15:22:21,710 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/2d5a2b76943e4b23bcc1787f264fec86 2024-12-03T15:22:21,715 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/A/d7186173fec14218a92126ec311a0747 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/d7186173fec14218a92126ec311a0747 2024-12-03T15:22:21,718 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/d7186173fec14218a92126ec311a0747, entries=150, sequenceid=407, filesize=30.5 K 2024-12-03T15:22:21,718 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/B/a7d40ddd9cb1464ca2f5c4097a8ad3de as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/a7d40ddd9cb1464ca2f5c4097a8ad3de 2024-12-03T15:22:21,721 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/a7d40ddd9cb1464ca2f5c4097a8ad3de, entries=150, sequenceid=407, filesize=12.0 K 2024-12-03T15:22:21,722 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/.tmp/C/2d5a2b76943e4b23bcc1787f264fec86 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/2d5a2b76943e4b23bcc1787f264fec86 2024-12-03T15:22:21,725 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/2d5a2b76943e4b23bcc1787f264fec86, entries=150, sequenceid=407, filesize=12.0 K 2024-12-03T15:22:21,725 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for a2e5b6b6d57ac0725cc77df907fce083 in 121ms, sequenceid=407, compaction requested=true 2024-12-03T15:22:21,726 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/09823856e8b24069abab0f448027b05d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ae6d6e0a1cc3422f9e5c2cdcb019411d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/30353f92d91641f58b6021904dc2f9a0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2c3e3bf98f8e411b8d256f910df1c759, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5d84206308454979a3e96ea4e5cef65d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/50d28f3cc7d04838abdff5d51e5609e5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ff6fdb683b474665bccb5b267c1b0981, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/987a37d0df514d2bb2aaffbcddb279f7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3744e4cc122e4657a1b070f8ef1fb9fa, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2c46f435856346f0bb2af6ef94b4f50d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6da5ece3e94741899051ebea5c361486, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2a90ee42ecc749d09cb28f96bc6698b5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3b9aeb6c3b4b48d8a75cfb3b9e86c6a1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/4f7f38663cab4a3b85165438a13cd39c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6ddeff555fa84571a5c948437b4eceb4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/610f5ed736e5470db8bac1ebb91e6a2e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/404d1cc9893e478f9cf6e930194be5fd, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fc2247c01abd47e498e044977f93596c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fb8c8fc4a9884a0e84511a9dced7821e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/bbbe36ffadc140f2a96d184b1791ffaf, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2289c767b13f49b89717a175c7f85204, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5320f46382644856956387d1ece44bd0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/1e02bed791574505962cc63e03a185b5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5b1b833ca4f7490b90faedd6ea85c478, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/73a87fca83d74781824e212c013b087f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/55160d2a5a674bf6913190d44a06a56b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ef9ab8f29fe6418ca1e15d640b4ab3c8] to archive 2024-12-03T15:22:21,727 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:22:21,728 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/09823856e8b24069abab0f448027b05d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/09823856e8b24069abab0f448027b05d 2024-12-03T15:22:21,729 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ae6d6e0a1cc3422f9e5c2cdcb019411d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ae6d6e0a1cc3422f9e5c2cdcb019411d 2024-12-03T15:22:21,729 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/30353f92d91641f58b6021904dc2f9a0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/30353f92d91641f58b6021904dc2f9a0 2024-12-03T15:22:21,730 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2c3e3bf98f8e411b8d256f910df1c759 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2c3e3bf98f8e411b8d256f910df1c759 2024-12-03T15:22:21,731 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5d84206308454979a3e96ea4e5cef65d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5d84206308454979a3e96ea4e5cef65d 2024-12-03T15:22:21,732 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/50d28f3cc7d04838abdff5d51e5609e5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/50d28f3cc7d04838abdff5d51e5609e5 2024-12-03T15:22:21,733 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ff6fdb683b474665bccb5b267c1b0981 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ff6fdb683b474665bccb5b267c1b0981 2024-12-03T15:22:21,734 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/987a37d0df514d2bb2aaffbcddb279f7 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/987a37d0df514d2bb2aaffbcddb279f7 2024-12-03T15:22:21,734 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3744e4cc122e4657a1b070f8ef1fb9fa to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3744e4cc122e4657a1b070f8ef1fb9fa 2024-12-03T15:22:21,735 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2c46f435856346f0bb2af6ef94b4f50d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2c46f435856346f0bb2af6ef94b4f50d 2024-12-03T15:22:21,736 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6da5ece3e94741899051ebea5c361486 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6da5ece3e94741899051ebea5c361486 2024-12-03T15:22:21,737 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2a90ee42ecc749d09cb28f96bc6698b5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2a90ee42ecc749d09cb28f96bc6698b5 2024-12-03T15:22:21,738 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3b9aeb6c3b4b48d8a75cfb3b9e86c6a1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/3b9aeb6c3b4b48d8a75cfb3b9e86c6a1 2024-12-03T15:22:21,739 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/4f7f38663cab4a3b85165438a13cd39c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/4f7f38663cab4a3b85165438a13cd39c 2024-12-03T15:22:21,739 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6ddeff555fa84571a5c948437b4eceb4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/6ddeff555fa84571a5c948437b4eceb4 2024-12-03T15:22:21,740 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/610f5ed736e5470db8bac1ebb91e6a2e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/610f5ed736e5470db8bac1ebb91e6a2e 2024-12-03T15:22:21,741 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/404d1cc9893e478f9cf6e930194be5fd to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/404d1cc9893e478f9cf6e930194be5fd 2024-12-03T15:22:21,742 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fc2247c01abd47e498e044977f93596c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fc2247c01abd47e498e044977f93596c 2024-12-03T15:22:21,743 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fb8c8fc4a9884a0e84511a9dced7821e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/fb8c8fc4a9884a0e84511a9dced7821e 2024-12-03T15:22:21,744 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/bbbe36ffadc140f2a96d184b1791ffaf to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/bbbe36ffadc140f2a96d184b1791ffaf 2024-12-03T15:22:21,745 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2289c767b13f49b89717a175c7f85204 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/2289c767b13f49b89717a175c7f85204 2024-12-03T15:22:21,746 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5320f46382644856956387d1ece44bd0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5320f46382644856956387d1ece44bd0 2024-12-03T15:22:21,747 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/1e02bed791574505962cc63e03a185b5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/1e02bed791574505962cc63e03a185b5 2024-12-03T15:22:21,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-03T15:22:21,748 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5b1b833ca4f7490b90faedd6ea85c478 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/5b1b833ca4f7490b90faedd6ea85c478 2024-12-03T15:22:21,749 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/73a87fca83d74781824e212c013b087f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/73a87fca83d74781824e212c013b087f 2024-12-03T15:22:21,750 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/55160d2a5a674bf6913190d44a06a56b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/55160d2a5a674bf6913190d44a06a56b 2024-12-03T15:22:21,751 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ef9ab8f29fe6418ca1e15d640b4ab3c8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ef9ab8f29fe6418ca1e15d640b4ab3c8 2024-12-03T15:22:21,752 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/7541ce23b4ec44779838a428c4d2c163, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/49a0822c0b3547fe848353024e38b9c4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/fd21a49038df476cbfd04a4eb2727e80, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/5c3296914f2d4f0bb2e3bb329833086d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/890a48df60ae4a9393d5581d3c2ac2de, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/b62f7c06d3dc4c1083a3e0d97d123f09, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/302c5a329c0041479b825c2f3df2bb51, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/264d7f436da84526930e80a0e15d5c81, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/03d98b94a7544c808fb7b397d95db10f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/34d7b225a1e54504afd1a686640a2433, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/3de8578b3bb740b88482aefebcfccdef, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/f20548bcf15d45cd893a7c3565999e7e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/f2f876cbc2ee4fa99c6ede28e2988eed, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/887b7c067b31493db48cb98a91fcc080, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/c270d3d12cd1446893093f4f7945b021, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/788ebe9a0b2d43eca0c882a0d2ea2473, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/ec014c582a3b4be2994551366068fb32, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/53e3d9d1f6e94e7ca6d52060944fdd6e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/421e254ee4634c1c8b0dfe82eae853ce, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/01bb4a9e60d2423bb4fd52106c8d8ae5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/edc1b8889b1e45cd86ed3aff09d5f38b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/3a6d0400b93c4420814e1031f8a1a061, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/c385a42a9f9742fe8c2ef3123458dc6d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/b129afb232754f0f87336c411945e858, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/62c90f9072c74638b7a99d84b60d2ab3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/e276f91dd24d45839a9e79acbf4a7068, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/91683272b18b4f74b9f95b0298e6b5b8] to archive 2024-12-03T15:22:21,752 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:22:21,753 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/7541ce23b4ec44779838a428c4d2c163 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/7541ce23b4ec44779838a428c4d2c163 2024-12-03T15:22:21,754 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/49a0822c0b3547fe848353024e38b9c4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/49a0822c0b3547fe848353024e38b9c4 2024-12-03T15:22:21,755 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/fd21a49038df476cbfd04a4eb2727e80 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/fd21a49038df476cbfd04a4eb2727e80 2024-12-03T15:22:21,756 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/5c3296914f2d4f0bb2e3bb329833086d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/5c3296914f2d4f0bb2e3bb329833086d 2024-12-03T15:22:21,756 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/890a48df60ae4a9393d5581d3c2ac2de to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/890a48df60ae4a9393d5581d3c2ac2de 2024-12-03T15:22:21,757 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/b62f7c06d3dc4c1083a3e0d97d123f09 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/b62f7c06d3dc4c1083a3e0d97d123f09 2024-12-03T15:22:21,758 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/302c5a329c0041479b825c2f3df2bb51 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/302c5a329c0041479b825c2f3df2bb51 2024-12-03T15:22:21,759 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/264d7f436da84526930e80a0e15d5c81 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/264d7f436da84526930e80a0e15d5c81 2024-12-03T15:22:21,760 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/03d98b94a7544c808fb7b397d95db10f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/03d98b94a7544c808fb7b397d95db10f 2024-12-03T15:22:21,761 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/34d7b225a1e54504afd1a686640a2433 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/34d7b225a1e54504afd1a686640a2433 2024-12-03T15:22:21,762 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/3de8578b3bb740b88482aefebcfccdef to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/3de8578b3bb740b88482aefebcfccdef 2024-12-03T15:22:21,764 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/f20548bcf15d45cd893a7c3565999e7e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/f20548bcf15d45cd893a7c3565999e7e 2024-12-03T15:22:21,765 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/f2f876cbc2ee4fa99c6ede28e2988eed to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/f2f876cbc2ee4fa99c6ede28e2988eed 2024-12-03T15:22:21,766 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/887b7c067b31493db48cb98a91fcc080 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/887b7c067b31493db48cb98a91fcc080 2024-12-03T15:22:21,766 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/c270d3d12cd1446893093f4f7945b021 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/c270d3d12cd1446893093f4f7945b021 2024-12-03T15:22:21,767 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/788ebe9a0b2d43eca0c882a0d2ea2473 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/788ebe9a0b2d43eca0c882a0d2ea2473 2024-12-03T15:22:21,768 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/ec014c582a3b4be2994551366068fb32 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/ec014c582a3b4be2994551366068fb32 2024-12-03T15:22:21,769 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/53e3d9d1f6e94e7ca6d52060944fdd6e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/53e3d9d1f6e94e7ca6d52060944fdd6e 2024-12-03T15:22:21,769 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/421e254ee4634c1c8b0dfe82eae853ce to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/421e254ee4634c1c8b0dfe82eae853ce 2024-12-03T15:22:21,770 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/01bb4a9e60d2423bb4fd52106c8d8ae5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/01bb4a9e60d2423bb4fd52106c8d8ae5 2024-12-03T15:22:21,771 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/edc1b8889b1e45cd86ed3aff09d5f38b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/edc1b8889b1e45cd86ed3aff09d5f38b 2024-12-03T15:22:21,772 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/3a6d0400b93c4420814e1031f8a1a061 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/3a6d0400b93c4420814e1031f8a1a061 2024-12-03T15:22:21,773 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/c385a42a9f9742fe8c2ef3123458dc6d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/c385a42a9f9742fe8c2ef3123458dc6d 2024-12-03T15:22:21,774 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/b129afb232754f0f87336c411945e858 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/b129afb232754f0f87336c411945e858 2024-12-03T15:22:21,775 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/62c90f9072c74638b7a99d84b60d2ab3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/62c90f9072c74638b7a99d84b60d2ab3 2024-12-03T15:22:21,775 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/e276f91dd24d45839a9e79acbf4a7068 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/e276f91dd24d45839a9e79acbf4a7068 2024-12-03T15:22:21,776 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/91683272b18b4f74b9f95b0298e6b5b8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/91683272b18b4f74b9f95b0298e6b5b8 2024-12-03T15:22:21,780 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/481a44ed66c844aebfb9ebea464a1237, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8faa1ccd175c46f7a15ba265aa391734, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/65f6557b05324fcb92a03f4ab9e8feef, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/42d7ac96ca6a4ef48588854efb84d1ae, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/ae3136e25f5141ffbdcb37847163f47e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/45e79b169f764f01b8fc25124e244af5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/9bfd486575464cea8ebe879551e68a68, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8fd2f92dfa6744bc8882f181d324cd98, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8065bb78dc7e4dad8250331e043f6e4d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/2d2bd850eb8940ec82beef04798408ea, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/808f9edb39cf4b7a947d0673338b55c2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/178430428070433286ee1ecc428784f5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/43c943f56ce0496fb50d4c08275fc1ac, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/54c1b794af1341359d1e2efb9a322804, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8a1d0fbb45714e3c979b2a32a8450b31, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/934b7e89bd184c99beb8262c1aa1033d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/5cb02d515bf249d58aca6354884fa24e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/dfb9ef32b4ff4809a2fac1e19aa7be2e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/162d7a307c724445a9c87c9c67bd78be, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/6588755b8b8e416da7409647feeb0e4a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/032ceb9448b3408b8145add93b1e2435, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/45a6f67af1884f1eb91c756b0151154b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/eefa2cfdd01942cbaf481a3bcbce1d6a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/0a8d0a0386d34c1785bd56ae0f9455cf, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/921334b55e324d40a35f1cc4c5eeee64, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/0de55cfca3b14319932396975ab0c4af, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/1ea843ff611e45e999a1abd242ebdc36] to archive 2024-12-03T15:22:21,781 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:22:21,782 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/481a44ed66c844aebfb9ebea464a1237 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/481a44ed66c844aebfb9ebea464a1237 2024-12-03T15:22:21,783 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8faa1ccd175c46f7a15ba265aa391734 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8faa1ccd175c46f7a15ba265aa391734 2024-12-03T15:22:21,784 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/65f6557b05324fcb92a03f4ab9e8feef to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/65f6557b05324fcb92a03f4ab9e8feef 2024-12-03T15:22:21,785 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/42d7ac96ca6a4ef48588854efb84d1ae to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/42d7ac96ca6a4ef48588854efb84d1ae 2024-12-03T15:22:21,786 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/ae3136e25f5141ffbdcb37847163f47e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/ae3136e25f5141ffbdcb37847163f47e 2024-12-03T15:22:21,787 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/45e79b169f764f01b8fc25124e244af5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/45e79b169f764f01b8fc25124e244af5 2024-12-03T15:22:21,788 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/9bfd486575464cea8ebe879551e68a68 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/9bfd486575464cea8ebe879551e68a68 2024-12-03T15:22:21,789 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8fd2f92dfa6744bc8882f181d324cd98 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8fd2f92dfa6744bc8882f181d324cd98 2024-12-03T15:22:21,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8065bb78dc7e4dad8250331e043f6e4d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8065bb78dc7e4dad8250331e043f6e4d 2024-12-03T15:22:21,795 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/2d2bd850eb8940ec82beef04798408ea to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/2d2bd850eb8940ec82beef04798408ea 2024-12-03T15:22:21,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/808f9edb39cf4b7a947d0673338b55c2 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/808f9edb39cf4b7a947d0673338b55c2 2024-12-03T15:22:21,799 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/178430428070433286ee1ecc428784f5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/178430428070433286ee1ecc428784f5 2024-12-03T15:22:21,800 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/43c943f56ce0496fb50d4c08275fc1ac to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/43c943f56ce0496fb50d4c08275fc1ac 2024-12-03T15:22:21,801 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/54c1b794af1341359d1e2efb9a322804 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/54c1b794af1341359d1e2efb9a322804 2024-12-03T15:22:21,802 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8a1d0fbb45714e3c979b2a32a8450b31 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/8a1d0fbb45714e3c979b2a32a8450b31 2024-12-03T15:22:21,803 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/934b7e89bd184c99beb8262c1aa1033d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/934b7e89bd184c99beb8262c1aa1033d 2024-12-03T15:22:21,804 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/5cb02d515bf249d58aca6354884fa24e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/5cb02d515bf249d58aca6354884fa24e 2024-12-03T15:22:21,805 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/dfb9ef32b4ff4809a2fac1e19aa7be2e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/dfb9ef32b4ff4809a2fac1e19aa7be2e 2024-12-03T15:22:21,806 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/162d7a307c724445a9c87c9c67bd78be to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/162d7a307c724445a9c87c9c67bd78be 2024-12-03T15:22:21,807 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/6588755b8b8e416da7409647feeb0e4a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/6588755b8b8e416da7409647feeb0e4a 2024-12-03T15:22:21,808 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/032ceb9448b3408b8145add93b1e2435 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/032ceb9448b3408b8145add93b1e2435 2024-12-03T15:22:21,809 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/45a6f67af1884f1eb91c756b0151154b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/45a6f67af1884f1eb91c756b0151154b 2024-12-03T15:22:21,810 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/eefa2cfdd01942cbaf481a3bcbce1d6a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/eefa2cfdd01942cbaf481a3bcbce1d6a 2024-12-03T15:22:21,811 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/0a8d0a0386d34c1785bd56ae0f9455cf to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/0a8d0a0386d34c1785bd56ae0f9455cf 2024-12-03T15:22:21,812 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/921334b55e324d40a35f1cc4c5eeee64 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/921334b55e324d40a35f1cc4c5eeee64 2024-12-03T15:22:21,812 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/0de55cfca3b14319932396975ab0c4af to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/0de55cfca3b14319932396975ab0c4af 2024-12-03T15:22:21,813 DEBUG [StoreCloser-TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/1ea843ff611e45e999a1abd242ebdc36 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/1ea843ff611e45e999a1abd242ebdc36 2024-12-03T15:22:21,818 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/recovered.edits/410.seqid, newMaxSeqId=410, maxSeqId=4 2024-12-03T15:22:21,818 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083. 2024-12-03T15:22:21,818 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] regionserver.HRegion(1635): Region close journal for a2e5b6b6d57ac0725cc77df907fce083: 2024-12-03T15:22:21,820 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=109}] handler.UnassignRegionHandler(170): Closed a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:21,820 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=108 updating hbase:meta row=a2e5b6b6d57ac0725cc77df907fce083, regionState=CLOSED 2024-12-03T15:22:21,822 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-03T15:22:21,822 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; CloseRegionProcedure a2e5b6b6d57ac0725cc77df907fce083, server=2b5ef621a0dd,46815,1733239226292 in 369 msec 2024-12-03T15:22:21,824 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=108, resume processing ppid=107 2024-12-03T15:22:21,824 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=a2e5b6b6d57ac0725cc77df907fce083, UNASSIGN in 372 msec 2024-12-03T15:22:21,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-03T15:22:21,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 376 msec 2024-12-03T15:22:21,826 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239341826"}]},"ts":"1733239341826"} 2024-12-03T15:22:21,827 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-03T15:22:21,829 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-03T15:22:21,831 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 392 msec 2024-12-03T15:22:22,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-03T15:22:22,049 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-03T15:22:22,049 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-03T15:22:22,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:22,051 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=110, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:22,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-03T15:22:22,051 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=110, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:22,054 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,056 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/recovered.edits] 2024-12-03T15:22:22,058 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ab8ddefadf71414f90c928f5a10200ad to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/ab8ddefadf71414f90c928f5a10200ad 2024-12-03T15:22:22,059 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/d7186173fec14218a92126ec311a0747 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/d7186173fec14218a92126ec311a0747 2024-12-03T15:22:22,060 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/dca8f6291b3e422aaafc7229f3e2b187 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/A/dca8f6291b3e422aaafc7229f3e2b187 2024-12-03T15:22:22,062 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/1be784842a944581a1fbe01d7bc04ba3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/1be784842a944581a1fbe01d7bc04ba3 2024-12-03T15:22:22,063 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/2f6f692183e44793abf2f84154fce102 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/2f6f692183e44793abf2f84154fce102 2024-12-03T15:22:22,064 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/a7d40ddd9cb1464ca2f5c4097a8ad3de to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/B/a7d40ddd9cb1464ca2f5c4097a8ad3de 2024-12-03T15:22:22,071 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/2d5a2b76943e4b23bcc1787f264fec86 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/2d5a2b76943e4b23bcc1787f264fec86 2024-12-03T15:22:22,072 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/405459c22aa6424292eca19cabc1dd76 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/405459c22aa6424292eca19cabc1dd76 2024-12-03T15:22:22,073 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/a265bc7266934a81a016c9a458e88253 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/C/a265bc7266934a81a016c9a458e88253 2024-12-03T15:22:22,076 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/recovered.edits/410.seqid to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083/recovered.edits/410.seqid 2024-12-03T15:22:22,077 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,077 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-03T15:22:22,077 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-03T15:22:22,078 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-03T15:22:22,082 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120320b9dd4a54f94b318919408522e328fe_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120320b9dd4a54f94b318919408522e328fe_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,083 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120324444d06222146469fc9300b02a6a889_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120324444d06222146469fc9300b02a6a889_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,088 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412032d069c51decd4c55b615e830c00850cd_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412032d069c51decd4c55b615e830c00850cd_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,089 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412032ee2d34868c8485f8326f04e8a2a0778_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412032ee2d34868c8485f8326f04e8a2a0778_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,090 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412033333e333795d49e9b6e3fbf2070c859f_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412033333e333795d49e9b6e3fbf2070c859f_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,091 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412033dd00f1e8309433096bafc6f02bffdd6_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412033dd00f1e8309433096bafc6f02bffdd6_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,092 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412033e958b3cc2924cad9a399e69e58c863a_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412033e958b3cc2924cad9a399e69e58c863a_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,101 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034200fe34ce00494799b8252d7a60265b_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412034200fe34ce00494799b8252d7a60265b_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,102 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203544718f537c1423a8e9f09333d253e47_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203544718f537c1423a8e9f09333d253e47_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,103 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120354c25e54f4664a7a8418125b1ff60788_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120354c25e54f4664a7a8418125b1ff60788_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,104 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120355440996f16747fb9ff2bb7936fefbae_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120355440996f16747fb9ff2bb7936fefbae_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,105 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412035ae8ca463e064a8b8541cdc068953012_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412035ae8ca463e064a8b8541cdc068953012_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,106 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120377483a96342245f4b7ae554f4ea164d2_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120377483a96342245f4b7ae554f4ea164d2_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,108 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412039ce32fe20a0547c0b4f8f77ca9ea9f29_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412039ce32fe20a0547c0b4f8f77ca9ea9f29_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,109 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203c192c51a117f439f95e1572b8416a8b1_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203c192c51a117f439f95e1572b8416a8b1_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,110 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203c43c99eb91a64273b4a5ca3f222a6f43_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203c43c99eb91a64273b4a5ca3f222a6f43_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,111 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203cf688dc70158408381f6a086e1141dd2_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203cf688dc70158408381f6a086e1141dd2_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,112 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203cf87486ef3d14da98e7159ae8b5304aa_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203cf87486ef3d14da98e7159ae8b5304aa_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,113 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203f20d71d808224421b93c19276a001c10_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203f20d71d808224421b93c19276a001c10_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,114 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203fb06022673fb495b9ab3fb3864715522_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203fb06022673fb495b9ab3fb3864715522_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,115 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203fff823e65de94cc1908c524f1786263d_a2e5b6b6d57ac0725cc77df907fce083 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203fff823e65de94cc1908c524f1786263d_a2e5b6b6d57ac0725cc77df907fce083 2024-12-03T15:22:22,115 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-03T15:22:22,117 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=110, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:22,118 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-03T15:22:22,121 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-03T15:22:22,121 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=110, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:22,121 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-03T15:22:22,122 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733239342122"}]},"ts":"9223372036854775807"} 2024-12-03T15:22:22,123 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-03T15:22:22,123 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => a2e5b6b6d57ac0725cc77df907fce083, NAME => 'TestAcidGuarantees,,1733239318956.a2e5b6b6d57ac0725cc77df907fce083.', STARTKEY => '', ENDKEY => ''}] 2024-12-03T15:22:22,123 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-03T15:22:22,123 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733239342123"}]},"ts":"9223372036854775807"} 2024-12-03T15:22:22,124 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-03T15:22:22,127 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=110, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:22,128 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 78 msec 2024-12-03T15:22:22,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-03T15:22:22,152 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-03T15:22:22,165 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=241 (was 237) - Thread LEAK? -, OpenFileDescriptor=461 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=894 (was 915), ProcessCount=11 (was 11), AvailableMemoryMB=1004 (was 1445) 2024-12-03T15:22:22,177 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=241, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=894, ProcessCount=11, AvailableMemoryMB=1004 2024-12-03T15:22:22,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-03T15:22:22,178 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T15:22:22,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=111, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:22,180 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=111, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T15:22:22,180 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:22,180 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 111 2024-12-03T15:22:22,181 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=111, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T15:22:22,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=111 2024-12-03T15:22:22,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742292_1468 (size=963) 2024-12-03T15:22:22,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=111 2024-12-03T15:22:22,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=111 2024-12-03T15:22:22,588 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411 2024-12-03T15:22:22,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742293_1469 (size=53) 2024-12-03T15:22:22,599 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:22:22,599 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6c345cf4429e47e0b5ec5adba6afb04a, disabling compactions & flushes 2024-12-03T15:22:22,599 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:22,599 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:22,599 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. after waiting 0 ms 2024-12-03T15:22:22,599 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:22,599 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:22,599 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:22,600 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=111, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T15:22:22,600 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733239342600"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733239342600"}]},"ts":"1733239342600"} 2024-12-03T15:22:22,601 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T15:22:22,602 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=111, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T15:22:22,602 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239342602"}]},"ts":"1733239342602"} 2024-12-03T15:22:22,603 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-03T15:22:22,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6c345cf4429e47e0b5ec5adba6afb04a, ASSIGN}] 2024-12-03T15:22:22,609 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6c345cf4429e47e0b5ec5adba6afb04a, ASSIGN 2024-12-03T15:22:22,609 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=112, ppid=111, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6c345cf4429e47e0b5ec5adba6afb04a, ASSIGN; state=OFFLINE, location=2b5ef621a0dd,46815,1733239226292; forceNewPlan=false, retain=false 2024-12-03T15:22:22,760 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=6c345cf4429e47e0b5ec5adba6afb04a, regionState=OPENING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:22,761 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; OpenRegionProcedure 6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:22:22,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=111 2024-12-03T15:22:22,913 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:22,916 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:22,916 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7285): Opening region: {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} 2024-12-03T15:22:22,917 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:22,917 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:22:22,917 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7327): checking encryption for 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:22,917 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7330): checking classloading for 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:22,918 INFO [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:22,922 INFO [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:22:22,922 INFO [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6c345cf4429e47e0b5ec5adba6afb04a columnFamilyName A 2024-12-03T15:22:22,922 DEBUG [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:22,930 INFO [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] regionserver.HStore(327): Store=6c345cf4429e47e0b5ec5adba6afb04a/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:22:22,930 INFO [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:22,941 INFO [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:22:22,941 INFO [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6c345cf4429e47e0b5ec5adba6afb04a columnFamilyName B 2024-12-03T15:22:22,941 DEBUG [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:22,946 INFO [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] regionserver.HStore(327): Store=6c345cf4429e47e0b5ec5adba6afb04a/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:22:22,946 INFO [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:22,949 INFO [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:22:22,949 INFO [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6c345cf4429e47e0b5ec5adba6afb04a columnFamilyName C 2024-12-03T15:22:22,949 DEBUG [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:22,950 INFO [StoreOpener-6c345cf4429e47e0b5ec5adba6afb04a-1 {}] regionserver.HStore(327): Store=6c345cf4429e47e0b5ec5adba6afb04a/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:22:22,950 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:22,951 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:22,951 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:22,953 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T15:22:22,956 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1085): writing seq id for 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:22,970 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T15:22:22,974 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1102): Opened 6c345cf4429e47e0b5ec5adba6afb04a; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70439078, jitterRate=0.04962405562400818}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T15:22:22,975 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1001): Region open journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:22,979 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., pid=113, masterSystemTime=1733239342912 2024-12-03T15:22:22,988 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:22,988 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:22,998 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=6c345cf4429e47e0b5ec5adba6afb04a, regionState=OPEN, openSeqNum=2, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,002 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-03T15:22:23,003 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; OpenRegionProcedure 6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 in 238 msec 2024-12-03T15:22:23,007 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=111 2024-12-03T15:22:23,007 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=111, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6c345cf4429e47e0b5ec5adba6afb04a, ASSIGN in 395 msec 2024-12-03T15:22:23,008 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=111, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T15:22:23,008 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239343008"}]},"ts":"1733239343008"} 2024-12-03T15:22:23,009 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-03T15:22:23,030 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=111, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T15:22:23,036 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 852 msec 2024-12-03T15:22:23,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=111 2024-12-03T15:22:23,286 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 111 completed 2024-12-03T15:22:23,288 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32c12a30 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79b10416 2024-12-03T15:22:23,298 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7177efc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:23,318 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:23,320 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60946, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:23,330 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T15:22:23,338 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52860, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T15:22:23,345 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ef40578 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f142b04 2024-12-03T15:22:23,382 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61d38088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:23,383 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x032bb71c to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@de9f076 2024-12-03T15:22:23,407 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7043f683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:23,408 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06bc0f7c to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4414259d 2024-12-03T15:22:23,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0c2472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:23,419 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8b6e04 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ed69825 2024-12-03T15:22:23,423 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b30c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:23,423 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bc486e1 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11193a0c 2024-12-03T15:22:23,430 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d672ed2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:23,431 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7861b162 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cf40102 2024-12-03T15:22:23,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b0e7b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:23,444 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x154f0f85 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@496fe03f 2024-12-03T15:22:23,450 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f2423f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:23,451 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x008a917b to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3652e74d 2024-12-03T15:22:23,457 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184771cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:23,458 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x054c2725 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2405c04e 2024-12-03T15:22:23,463 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76f0408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:23,464 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79d49886 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@73d92042 2024-12-03T15:22:23,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c692575, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:23,482 DEBUG [hconnection-0x11262c63-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:23,483 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:23,486 DEBUG [hconnection-0xdb10cb3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:23,487 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60972, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:23,493 DEBUG [hconnection-0x20fb0dfe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:23,493 DEBUG [hconnection-0x5ccc5fd4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:23,494 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:23,494 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32768, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:23,495 DEBUG [hconnection-0x35b4cd7f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:23,495 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:23,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:23,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:22:23,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:23,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:23,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:23,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:23,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:23,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:23,502 DEBUG [hconnection-0x2aeb0800-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:23,503 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32778, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:23,504 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:23,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-03T15:22:23,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-03T15:22:23,511 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:23,511 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:23,512 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:23,514 DEBUG [hconnection-0x536c23fb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:23,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239403517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239403518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,519 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32788, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:23,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239403519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239403519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,520 DEBUG [hconnection-0x6c2f3258-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:23,522 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32796, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:23,526 DEBUG [hconnection-0x6f6059ab-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:23,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239403526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,527 DEBUG [hconnection-0x58a5ec77-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:23,527 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32810, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:23,528 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32812, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:23,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/128fcc5a2e65456f96c1da1cc9fc30a5 is 50, key is test_row_0/A:col10/1733239343491/Put/seqid=0 2024-12-03T15:22:23,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742294_1470 (size=12001) 2024-12-03T15:22:23,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-03T15:22:23,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239403619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239403619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239403621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239403621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239403628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,664 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:23,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:23,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:23,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:23,665 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:23,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:23,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:23,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-03T15:22:23,817 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:23,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:23,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:23,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:23,818 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:23,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:23,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:23,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239403824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239403824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239403824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239403825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:23,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239403830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/128fcc5a2e65456f96c1da1cc9fc30a5 2024-12-03T15:22:23,970 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:23,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:23,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:23,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:23,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:23,971 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:23,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:23,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/1d2cea0bd3d74766b430a24ff37c0321 is 50, key is test_row_0/B:col10/1733239343491/Put/seqid=0 2024-12-03T15:22:24,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742295_1471 (size=12001) 2024-12-03T15:22:24,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-03T15:22:24,123 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:24,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:24,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:24,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:24,124 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239404128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239404129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239404129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239404131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239404142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,277 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,277 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:24,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:24,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:24,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:24,278 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/1d2cea0bd3d74766b430a24ff37c0321 2024-12-03T15:22:24,433 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,438 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:24,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:24,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:24,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:24,438 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/9a6df1e689204a6088479e02f899d4ca is 50, key is test_row_0/C:col10/1733239343491/Put/seqid=0 2024-12-03T15:22:24,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742296_1472 (size=12001) 2024-12-03T15:22:24,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/9a6df1e689204a6088479e02f899d4ca 2024-12-03T15:22:24,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/128fcc5a2e65456f96c1da1cc9fc30a5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/128fcc5a2e65456f96c1da1cc9fc30a5 2024-12-03T15:22:24,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/128fcc5a2e65456f96c1da1cc9fc30a5, entries=150, sequenceid=12, filesize=11.7 K 2024-12-03T15:22:24,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/1d2cea0bd3d74766b430a24ff37c0321 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/1d2cea0bd3d74766b430a24ff37c0321 2024-12-03T15:22:24,591 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/1d2cea0bd3d74766b430a24ff37c0321, entries=150, sequenceid=12, filesize=11.7 K 2024-12-03T15:22:24,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:24,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:24,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:24,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:24,595 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/9a6df1e689204a6088479e02f899d4ca as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/9a6df1e689204a6088479e02f899d4ca 2024-12-03T15:22:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-03T15:22:24,625 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/9a6df1e689204a6088479e02f899d4ca, entries=150, sequenceid=12, filesize=11.7 K 2024-12-03T15:22:24,633 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 6c345cf4429e47e0b5ec5adba6afb04a in 1135ms, sequenceid=12, compaction requested=false 2024-12-03T15:22:24,634 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-03T15:22:24,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:24,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-03T15:22:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:24,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:24,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:24,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:24,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:24,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:24,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:24,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/41b609fb54c34eafb51b184358a35ed2 is 50, key is test_row_0/A:col10/1733239344645/Put/seqid=0 2024-12-03T15:22:24,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239404656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239404658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239404659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239404659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239404660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,683 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T15:22:24,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742297_1473 (size=12001) 2024-12-03T15:22:24,761 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:24,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:24,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:24,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:24,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239404766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239404766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239404766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239404770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239404772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,914 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:24,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:24,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:24,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:24,915 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:24,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239404971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239404970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239404970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239404975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:24,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:24,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239404975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,067 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:25,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:25,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:25,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:25,068 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:25,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:25,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:25,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/41b609fb54c34eafb51b184358a35ed2 2024-12-03T15:22:25,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/e109f938ba3a4ace8a746d9ca08e12a8 is 50, key is test_row_0/B:col10/1733239344645/Put/seqid=0 2024-12-03T15:22:25,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742298_1474 (size=12001) 2024-12-03T15:22:25,220 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:25,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:25,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:25,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:25,221 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:25,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:25,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:25,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239405275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,277 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239405276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239405277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239405281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239405284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,373 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:25,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:25,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:25,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:25,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:25,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:25,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:25,519 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/e109f938ba3a4ace8a746d9ca08e12a8 2024-12-03T15:22:25,525 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:25,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:25,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:25,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:25,526 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:25,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:25,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:25,548 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/f1f1950688ec41079bed7d96be6dd239 is 50, key is test_row_0/C:col10/1733239344645/Put/seqid=0 2024-12-03T15:22:25,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742299_1475 (size=12001) 2024-12-03T15:22:25,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/f1f1950688ec41079bed7d96be6dd239 2024-12-03T15:22:25,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/41b609fb54c34eafb51b184358a35ed2 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/41b609fb54c34eafb51b184358a35ed2 2024-12-03T15:22:25,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/41b609fb54c34eafb51b184358a35ed2, entries=150, sequenceid=40, filesize=11.7 K 2024-12-03T15:22:25,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/e109f938ba3a4ace8a746d9ca08e12a8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e109f938ba3a4ace8a746d9ca08e12a8 2024-12-03T15:22:25,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e109f938ba3a4ace8a746d9ca08e12a8, entries=150, sequenceid=40, filesize=11.7 K 2024-12-03T15:22:25,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/f1f1950688ec41079bed7d96be6dd239 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f1f1950688ec41079bed7d96be6dd239 2024-12-03T15:22:25,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f1f1950688ec41079bed7d96be6dd239, entries=150, sequenceid=40, filesize=11.7 K 2024-12-03T15:22:25,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 6c345cf4429e47e0b5ec5adba6afb04a in 940ms, sequenceid=40, compaction requested=false 2024-12-03T15:22:25,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-03T15:22:25,678 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-03T15:22:25,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:25,679 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-03T15:22:25,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:25,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:25,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:25,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:25,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:25,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:25,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/48a67299d5cf48b7b20156989fafb3ef is 50, key is test_row_0/A:col10/1733239344659/Put/seqid=0 2024-12-03T15:22:25,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742300_1476 (size=9657) 2024-12-03T15:22:25,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:25,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:25,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239405814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239405815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239405819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239405821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239405822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239405923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239405927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239405933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239405933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:25,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:25,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239405934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,122 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/48a67299d5cf48b7b20156989fafb3ef 2024-12-03T15:22:26,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239406130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/d221ba9bb79b420f820425d2a50021d5 is 50, key is test_row_0/B:col10/1733239344659/Put/seqid=0 2024-12-03T15:22:26,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239406139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239406142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239406143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239406145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742301_1477 (size=9657) 2024-12-03T15:22:26,159 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/d221ba9bb79b420f820425d2a50021d5 2024-12-03T15:22:26,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/3e38439bf3994ba58868a2c4bcbc6abe is 50, key is test_row_0/C:col10/1733239344659/Put/seqid=0 2024-12-03T15:22:26,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742302_1478 (size=9657) 2024-12-03T15:22:26,204 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/3e38439bf3994ba58868a2c4bcbc6abe 2024-12-03T15:22:26,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/48a67299d5cf48b7b20156989fafb3ef as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/48a67299d5cf48b7b20156989fafb3ef 2024-12-03T15:22:26,235 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/48a67299d5cf48b7b20156989fafb3ef, entries=100, sequenceid=48, filesize=9.4 K 2024-12-03T15:22:26,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/d221ba9bb79b420f820425d2a50021d5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/d221ba9bb79b420f820425d2a50021d5 2024-12-03T15:22:26,247 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/d221ba9bb79b420f820425d2a50021d5, entries=100, sequenceid=48, filesize=9.4 K 2024-12-03T15:22:26,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/3e38439bf3994ba58868a2c4bcbc6abe as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/3e38439bf3994ba58868a2c4bcbc6abe 2024-12-03T15:22:26,264 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/3e38439bf3994ba58868a2c4bcbc6abe, entries=100, sequenceid=48, filesize=9.4 K 2024-12-03T15:22:26,265 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for 6c345cf4429e47e0b5ec5adba6afb04a in 586ms, sequenceid=48, compaction requested=true 2024-12-03T15:22:26,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:26,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:26,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-03T15:22:26,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-03T15:22:26,275 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-03T15:22:26,275 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7620 sec 2024-12-03T15:22:26,277 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.7720 sec 2024-12-03T15:22:26,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:26,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-03T15:22:26,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:26,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:26,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:26,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:26,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:26,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:26,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/7c70a2f9801647b6b88e30cb6840400b is 50, key is test_row_0/A:col10/1733239345817/Put/seqid=0 2024-12-03T15:22:26,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742303_1479 (size=14341) 2024-12-03T15:22:26,449 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/7c70a2f9801647b6b88e30cb6840400b 2024-12-03T15:22:26,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/45636104b5ba412d9efdc40fbaee1dc4 is 50, key is test_row_0/B:col10/1733239345817/Put/seqid=0 2024-12-03T15:22:26,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239406447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239406448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239406449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742304_1480 (size=12001) 2024-12-03T15:22:26,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239406458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239406458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239406559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239406565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239406566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239406765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239406773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239406773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,864 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/45636104b5ba412d9efdc40fbaee1dc4 2024-12-03T15:22:26,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/d22ec03137b94b238db087c7029551ca is 50, key is test_row_0/C:col10/1733239345817/Put/seqid=0 2024-12-03T15:22:26,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742305_1481 (size=12001) 2024-12-03T15:22:26,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239406960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:26,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:26,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239406961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239407074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239407079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239407081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,276 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/d22ec03137b94b238db087c7029551ca 2024-12-03T15:22:27,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/7c70a2f9801647b6b88e30cb6840400b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/7c70a2f9801647b6b88e30cb6840400b 2024-12-03T15:22:27,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/7c70a2f9801647b6b88e30cb6840400b, entries=200, sequenceid=78, filesize=14.0 K 2024-12-03T15:22:27,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/45636104b5ba412d9efdc40fbaee1dc4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/45636104b5ba412d9efdc40fbaee1dc4 2024-12-03T15:22:27,291 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/45636104b5ba412d9efdc40fbaee1dc4, entries=150, sequenceid=78, filesize=11.7 K 2024-12-03T15:22:27,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/d22ec03137b94b238db087c7029551ca as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/d22ec03137b94b238db087c7029551ca 2024-12-03T15:22:27,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/d22ec03137b94b238db087c7029551ca, entries=150, sequenceid=78, filesize=11.7 K 2024-12-03T15:22:27,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=26.84 KB/27480 for 6c345cf4429e47e0b5ec5adba6afb04a in 859ms, sequenceid=78, compaction requested=true 2024-12-03T15:22:27,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:27,297 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:22:27,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:27,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:27,298 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:22:27,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:27,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:27,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:27,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:27,299 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48000 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:22:27,299 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/A is initiating minor compaction (all files) 2024-12-03T15:22:27,299 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/A in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:27,299 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/128fcc5a2e65456f96c1da1cc9fc30a5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/41b609fb54c34eafb51b184358a35ed2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/48a67299d5cf48b7b20156989fafb3ef, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/7c70a2f9801647b6b88e30cb6840400b] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=46.9 K 2024-12-03T15:22:27,300 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:22:27,300 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/B is initiating minor compaction (all files) 2024-12-03T15:22:27,300 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/B in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:27,300 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/1d2cea0bd3d74766b430a24ff37c0321, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e109f938ba3a4ace8a746d9ca08e12a8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/d221ba9bb79b420f820425d2a50021d5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/45636104b5ba412d9efdc40fbaee1dc4] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=44.6 K 2024-12-03T15:22:27,300 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 128fcc5a2e65456f96c1da1cc9fc30a5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733239343491 2024-12-03T15:22:27,301 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d2cea0bd3d74766b430a24ff37c0321, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733239343491 2024-12-03T15:22:27,301 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41b609fb54c34eafb51b184358a35ed2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733239343515 2024-12-03T15:22:27,301 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting e109f938ba3a4ace8a746d9ca08e12a8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733239343515 2024-12-03T15:22:27,301 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48a67299d5cf48b7b20156989fafb3ef, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1733239344658 2024-12-03T15:22:27,302 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting d221ba9bb79b420f820425d2a50021d5, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1733239344658 2024-12-03T15:22:27,302 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c70a2f9801647b6b88e30cb6840400b, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239345811 2024-12-03T15:22:27,303 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 45636104b5ba412d9efdc40fbaee1dc4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239345817 2024-12-03T15:22:27,314 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#B#compaction#396 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:27,315 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/fe7e544c9eaa4c92ab8b31346a0cd373 is 50, key is test_row_0/B:col10/1733239345817/Put/seqid=0 2024-12-03T15:22:27,320 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#A#compaction#397 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:27,321 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/2eaf94cd72e04880ae63c49618f6184c is 50, key is test_row_0/A:col10/1733239345817/Put/seqid=0 2024-12-03T15:22:27,344 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-03T15:22:27,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742306_1482 (size=12139) 2024-12-03T15:22:27,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742307_1483 (size=12139) 2024-12-03T15:22:27,358 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/fe7e544c9eaa4c92ab8b31346a0cd373 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/fe7e544c9eaa4c92ab8b31346a0cd373 2024-12-03T15:22:27,363 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/B of 6c345cf4429e47e0b5ec5adba6afb04a into fe7e544c9eaa4c92ab8b31346a0cd373(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:27,363 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:27,363 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/B, priority=12, startTime=1733239347298; duration=0sec 2024-12-03T15:22:27,363 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:27,363 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:B 2024-12-03T15:22:27,363 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:22:27,364 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45660 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:22:27,364 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/C is initiating minor compaction (all files) 2024-12-03T15:22:27,364 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/C in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:27,364 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/9a6df1e689204a6088479e02f899d4ca, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f1f1950688ec41079bed7d96be6dd239, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/3e38439bf3994ba58868a2c4bcbc6abe, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/d22ec03137b94b238db087c7029551ca] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=44.6 K 2024-12-03T15:22:27,365 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a6df1e689204a6088479e02f899d4ca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733239343491 2024-12-03T15:22:27,365 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting f1f1950688ec41079bed7d96be6dd239, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733239343515 2024-12-03T15:22:27,366 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e38439bf3994ba58868a2c4bcbc6abe, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1733239344658 2024-12-03T15:22:27,366 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting d22ec03137b94b238db087c7029551ca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239345817 2024-12-03T15:22:27,380 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#C#compaction#398 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:27,380 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/37af28ae854a4b58a3c3304ffd10b3c5 is 50, key is test_row_0/C:col10/1733239345817/Put/seqid=0 2024-12-03T15:22:27,407 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/2eaf94cd72e04880ae63c49618f6184c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/2eaf94cd72e04880ae63c49618f6184c 2024-12-03T15:22:27,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742308_1484 (size=12139) 2024-12-03T15:22:27,424 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/A of 6c345cf4429e47e0b5ec5adba6afb04a into 2eaf94cd72e04880ae63c49618f6184c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:27,424 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:27,424 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/A, priority=12, startTime=1733239347297; duration=0sec 2024-12-03T15:22:27,424 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:27,424 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:A 2024-12-03T15:22:27,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:27,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:22:27,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:27,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:27,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:27,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:27,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:27,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:27,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/81880de33eff4a868b9a9b8482d0f0b0 is 50, key is test_row_0/A:col10/1733239347587/Put/seqid=0 2024-12-03T15:22:27,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742309_1485 (size=14341) 2024-12-03T15:22:27,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-03T15:22:27,621 INFO [Thread-2095 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-03T15:22:27,622 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:27,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-03T15:22:27,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-03T15:22:27,624 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:27,624 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:27,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:27,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239407664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239407664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239407668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-03T15:22:27,775 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-03T15:22:27,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239407770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:27,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:27,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:27,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239407770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:27,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:27,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239407771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:27,835 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/37af28ae854a4b58a3c3304ffd10b3c5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/37af28ae854a4b58a3c3304ffd10b3c5 2024-12-03T15:22:27,846 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/C of 6c345cf4429e47e0b5ec5adba6afb04a into 37af28ae854a4b58a3c3304ffd10b3c5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:27,846 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:27,846 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/C, priority=12, startTime=1733239347299; duration=0sec 2024-12-03T15:22:27,846 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:27,846 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:C 2024-12-03T15:22:27,930 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-03T15:22:27,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-03T15:22:27,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:27,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:27,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:27,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:27,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:27,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:27,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239407970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239407970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239407977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239407978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:27,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:27,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239407978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/81880de33eff4a868b9a9b8482d0f0b0 2024-12-03T15:22:28,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/1f6dfe7771c643d590e30c525b006603 is 50, key is test_row_0/B:col10/1733239347587/Put/seqid=0 2024-12-03T15:22:28,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742310_1486 (size=12001) 2024-12-03T15:22:28,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/1f6dfe7771c643d590e30c525b006603 2024-12-03T15:22:28,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/e5b578e3fc954e54ad2bdf270455851b is 50, key is test_row_0/C:col10/1733239347587/Put/seqid=0 2024-12-03T15:22:28,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742311_1487 (size=12001) 2024-12-03T15:22:28,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/e5b578e3fc954e54ad2bdf270455851b 2024-12-03T15:22:28,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/81880de33eff4a868b9a9b8482d0f0b0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/81880de33eff4a868b9a9b8482d0f0b0 2024-12-03T15:22:28,057 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/81880de33eff4a868b9a9b8482d0f0b0, entries=200, sequenceid=91, filesize=14.0 K 2024-12-03T15:22:28,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/1f6dfe7771c643d590e30c525b006603 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/1f6dfe7771c643d590e30c525b006603 2024-12-03T15:22:28,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/1f6dfe7771c643d590e30c525b006603, entries=150, sequenceid=91, filesize=11.7 K 2024-12-03T15:22:28,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/e5b578e3fc954e54ad2bdf270455851b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/e5b578e3fc954e54ad2bdf270455851b 2024-12-03T15:22:28,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/e5b578e3fc954e54ad2bdf270455851b, entries=150, sequenceid=91, filesize=11.7 K 2024-12-03T15:22:28,066 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6c345cf4429e47e0b5ec5adba6afb04a in 475ms, sequenceid=91, compaction requested=false 2024-12-03T15:22:28,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:28,087 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-03T15:22:28,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:28,088 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-03T15:22:28,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:28,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:28,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:28,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:28,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:28,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:28,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/3472c3fe684346edbfc479862d42746b is 50, key is test_row_0/A:col10/1733239347663/Put/seqid=0 2024-12-03T15:22:28,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742312_1488 (size=12001) 2024-12-03T15:22:28,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-03T15:22:28,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:28,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:28,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:28,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239408298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:28,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239408304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:28,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239408304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,379 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/128fcc5a2e65456f96c1da1cc9fc30a5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/41b609fb54c34eafb51b184358a35ed2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/48a67299d5cf48b7b20156989fafb3ef, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/7c70a2f9801647b6b88e30cb6840400b] to archive 2024-12-03T15:22:28,380 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:22:28,381 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/128fcc5a2e65456f96c1da1cc9fc30a5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/128fcc5a2e65456f96c1da1cc9fc30a5 2024-12-03T15:22:28,382 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/41b609fb54c34eafb51b184358a35ed2 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/41b609fb54c34eafb51b184358a35ed2 2024-12-03T15:22:28,383 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/48a67299d5cf48b7b20156989fafb3ef to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/48a67299d5cf48b7b20156989fafb3ef 2024-12-03T15:22:28,385 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/7c70a2f9801647b6b88e30cb6840400b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/7c70a2f9801647b6b88e30cb6840400b 2024-12-03T15:22:28,386 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/1d2cea0bd3d74766b430a24ff37c0321, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e109f938ba3a4ace8a746d9ca08e12a8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/d221ba9bb79b420f820425d2a50021d5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/45636104b5ba412d9efdc40fbaee1dc4] to archive 2024-12-03T15:22:28,387 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:22:28,388 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/1d2cea0bd3d74766b430a24ff37c0321 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/1d2cea0bd3d74766b430a24ff37c0321 2024-12-03T15:22:28,389 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e109f938ba3a4ace8a746d9ca08e12a8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e109f938ba3a4ace8a746d9ca08e12a8 2024-12-03T15:22:28,390 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/d221ba9bb79b420f820425d2a50021d5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/d221ba9bb79b420f820425d2a50021d5 2024-12-03T15:22:28,391 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/45636104b5ba412d9efdc40fbaee1dc4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/45636104b5ba412d9efdc40fbaee1dc4 2024-12-03T15:22:28,392 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/9a6df1e689204a6088479e02f899d4ca, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f1f1950688ec41079bed7d96be6dd239, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/3e38439bf3994ba58868a2c4bcbc6abe, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/d22ec03137b94b238db087c7029551ca] to archive 2024-12-03T15:22:28,393 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:22:28,395 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/9a6df1e689204a6088479e02f899d4ca to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/9a6df1e689204a6088479e02f899d4ca 2024-12-03T15:22:28,397 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f1f1950688ec41079bed7d96be6dd239 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f1f1950688ec41079bed7d96be6dd239 2024-12-03T15:22:28,398 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/3e38439bf3994ba58868a2c4bcbc6abe to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/3e38439bf3994ba58868a2c4bcbc6abe 2024-12-03T15:22:28,399 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/2b5ef621a0dd:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/d22ec03137b94b238db087c7029551ca to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/d22ec03137b94b238db087c7029551ca 2024-12-03T15:22:28,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:28,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239408406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:28,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239408413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:28,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239408421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,556 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/3472c3fe684346edbfc479862d42746b 2024-12-03T15:22:28,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/8cb4e5fccd794bef8d3afafa795c32d8 is 50, key is test_row_0/B:col10/1733239347663/Put/seqid=0 2024-12-03T15:22:28,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742313_1489 (size=12001) 2024-12-03T15:22:28,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:28,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239408614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:28,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239408619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:28,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239408628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-03T15:22:28,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:28,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239408920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:28,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239408925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:28,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239408934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:28,966 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/8cb4e5fccd794bef8d3afafa795c32d8 2024-12-03T15:22:28,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/dd1b5ce89e83467981aec547c2f7c706 is 50, key is test_row_0/C:col10/1733239347663/Put/seqid=0 2024-12-03T15:22:28,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742314_1490 (size=12001) 2024-12-03T15:22:28,986 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/dd1b5ce89e83467981aec547c2f7c706 2024-12-03T15:22:28,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/3472c3fe684346edbfc479862d42746b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3472c3fe684346edbfc479862d42746b 2024-12-03T15:22:29,002 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3472c3fe684346edbfc479862d42746b, entries=150, sequenceid=117, filesize=11.7 K 2024-12-03T15:22:29,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/8cb4e5fccd794bef8d3afafa795c32d8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8cb4e5fccd794bef8d3afafa795c32d8 2024-12-03T15:22:29,008 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8cb4e5fccd794bef8d3afafa795c32d8, entries=150, sequenceid=117, filesize=11.7 K 2024-12-03T15:22:29,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/dd1b5ce89e83467981aec547c2f7c706 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/dd1b5ce89e83467981aec547c2f7c706 2024-12-03T15:22:29,016 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/dd1b5ce89e83467981aec547c2f7c706, entries=150, sequenceid=117, filesize=11.7 K 2024-12-03T15:22:29,017 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6c345cf4429e47e0b5ec5adba6afb04a in 928ms, sequenceid=117, compaction requested=true 2024-12-03T15:22:29,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:29,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:29,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-03T15:22:29,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-03T15:22:29,019 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-03T15:22:29,019 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3940 sec 2024-12-03T15:22:29,022 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.3970 sec 2024-12-03T15:22:29,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:29,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-03T15:22:29,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:29,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:29,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:29,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:29,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:29,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:29,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/370cfa2378e14e2284b4fa8a25206c19 is 50, key is test_row_0/A:col10/1733239349431/Put/seqid=0 2024-12-03T15:22:29,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742315_1491 (size=14391) 2024-12-03T15:22:29,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:29,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239409491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:29,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:29,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239409491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:29,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:29,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239409498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:29,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239409600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:29,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239409600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:29,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:29,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239409606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:29,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-03T15:22:29,735 INFO [Thread-2095 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-03T15:22:29,736 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:29,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-03T15:22:29,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-03T15:22:29,738 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:29,739 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:29,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:29,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:29,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239409808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:29,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:29,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239409808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:29,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:29,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239409811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:29,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-03T15:22:29,842 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/370cfa2378e14e2284b4fa8a25206c19 2024-12-03T15:22:29,848 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/58d42746f8f44d1c86e608d4572cb78d is 50, key is test_row_0/B:col10/1733239349431/Put/seqid=0 2024-12-03T15:22:29,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742316_1492 (size=12051) 2024-12-03T15:22:29,856 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/58d42746f8f44d1c86e608d4572cb78d 2024-12-03T15:22:29,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/300b26b2995d4a428c2223488829385b is 50, key is test_row_0/C:col10/1733239349431/Put/seqid=0 2024-12-03T15:22:29,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742317_1493 (size=12051) 2024-12-03T15:22:29,895 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:29,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-03T15:22:29,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:29,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:29,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:29,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:29,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:29,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:29,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:29,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239409973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:29,975 DEBUG [Thread-2091 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4160 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:22:29,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:29,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239409976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:29,983 DEBUG [Thread-2087 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:22:30,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-03T15:22:30,049 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-03T15:22:30,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:30,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:30,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:30,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:30,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:30,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:30,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:30,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239410118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:30,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239410119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:30,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239410121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,202 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-03T15:22:30,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:30,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:30,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:30,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:30,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:30,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:30,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/300b26b2995d4a428c2223488829385b 2024-12-03T15:22:30,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/370cfa2378e14e2284b4fa8a25206c19 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/370cfa2378e14e2284b4fa8a25206c19 2024-12-03T15:22:30,333 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/370cfa2378e14e2284b4fa8a25206c19, entries=200, sequenceid=129, filesize=14.1 K 2024-12-03T15:22:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-03T15:22:30,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/58d42746f8f44d1c86e608d4572cb78d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/58d42746f8f44d1c86e608d4572cb78d 2024-12-03T15:22:30,360 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-03T15:22:30,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:30,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:30,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:30,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:30,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:30,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:30,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/58d42746f8f44d1c86e608d4572cb78d, entries=150, sequenceid=129, filesize=11.8 K 2024-12-03T15:22:30,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/300b26b2995d4a428c2223488829385b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/300b26b2995d4a428c2223488829385b 2024-12-03T15:22:30,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/300b26b2995d4a428c2223488829385b, entries=150, sequenceid=129, filesize=11.8 K 2024-12-03T15:22:30,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6c345cf4429e47e0b5ec5adba6afb04a in 974ms, sequenceid=129, compaction requested=true 2024-12-03T15:22:30,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:30,406 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:22:30,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:30,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:30,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:30,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:30,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:30,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-03T15:22:30,407 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:22:30,418 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52872 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:22:30,418 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/A is initiating minor compaction (all files) 2024-12-03T15:22:30,418 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/A in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:30,418 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/2eaf94cd72e04880ae63c49618f6184c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/81880de33eff4a868b9a9b8482d0f0b0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3472c3fe684346edbfc479862d42746b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/370cfa2378e14e2284b4fa8a25206c19] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=51.6 K 2024-12-03T15:22:30,419 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 2eaf94cd72e04880ae63c49618f6184c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239345817 2024-12-03T15:22:30,420 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:22:30,420 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 81880de33eff4a868b9a9b8482d0f0b0, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733239346447 2024-12-03T15:22:30,420 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/B is initiating minor compaction (all files) 2024-12-03T15:22:30,420 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/B in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:30,420 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/fe7e544c9eaa4c92ab8b31346a0cd373, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/1f6dfe7771c643d590e30c525b006603, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8cb4e5fccd794bef8d3afafa795c32d8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/58d42746f8f44d1c86e608d4572cb78d] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=47.1 K 2024-12-03T15:22:30,420 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 3472c3fe684346edbfc479862d42746b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733239347656 2024-12-03T15:22:30,420 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe7e544c9eaa4c92ab8b31346a0cd373, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239345817 2024-12-03T15:22:30,421 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f6dfe7771c643d590e30c525b006603, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733239347586 2024-12-03T15:22:30,421 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 370cfa2378e14e2284b4fa8a25206c19, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733239348296 2024-12-03T15:22:30,423 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8cb4e5fccd794bef8d3afafa795c32d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733239347656 2024-12-03T15:22:30,424 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58d42746f8f44d1c86e608d4572cb78d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733239348296 2024-12-03T15:22:30,465 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#A#compaction#408 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:30,466 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/1e4f059743bd47b686d69925436b58f2 is 50, key is test_row_0/A:col10/1733239349431/Put/seqid=0 2024-12-03T15:22:30,466 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#B#compaction#409 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:30,467 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/cb5e833cdd21430ca49c2cb1b020b24f is 50, key is test_row_0/B:col10/1733239349431/Put/seqid=0 2024-12-03T15:22:30,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742318_1494 (size=12189) 2024-12-03T15:22:30,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742319_1495 (size=12189) 2024-12-03T15:22:30,517 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-03T15:22:30,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:30,521 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-03T15:22:30,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:30,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:30,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:30,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:30,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:30,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:30,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/d3397f3b0fd54ccb9c929da8706c0309 is 50, key is test_row_0/A:col10/1733239349490/Put/seqid=0 2024-12-03T15:22:30,554 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/1e4f059743bd47b686d69925436b58f2 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/1e4f059743bd47b686d69925436b58f2 2024-12-03T15:22:30,571 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/A of 6c345cf4429e47e0b5ec5adba6afb04a into 1e4f059743bd47b686d69925436b58f2(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:30,571 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:30,571 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/A, priority=12, startTime=1733239350406; duration=0sec 2024-12-03T15:22:30,571 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:30,571 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:A 2024-12-03T15:22:30,572 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:22:30,573 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:22:30,573 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/C is initiating minor compaction (all files) 2024-12-03T15:22:30,573 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/C in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:30,573 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/37af28ae854a4b58a3c3304ffd10b3c5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/e5b578e3fc954e54ad2bdf270455851b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/dd1b5ce89e83467981aec547c2f7c706, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/300b26b2995d4a428c2223488829385b] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=47.1 K 2024-12-03T15:22:30,574 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 37af28ae854a4b58a3c3304ffd10b3c5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239345817 2024-12-03T15:22:30,574 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting e5b578e3fc954e54ad2bdf270455851b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1733239347586 2024-12-03T15:22:30,575 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting dd1b5ce89e83467981aec547c2f7c706, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733239347656 2024-12-03T15:22:30,575 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 300b26b2995d4a428c2223488829385b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733239348296 2024-12-03T15:22:30,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742320_1496 (size=12151) 2024-12-03T15:22:30,594 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/d3397f3b0fd54ccb9c929da8706c0309 2024-12-03T15:22:30,596 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#C#compaction#411 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:30,596 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/5992d1650e4b495a955eb39caf751d28 is 50, key is test_row_0/C:col10/1733239349431/Put/seqid=0 2024-12-03T15:22:30,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:30,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:30,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/71c0a82ea7c04eb9937ef50748087626 is 50, key is test_row_0/B:col10/1733239349490/Put/seqid=0 2024-12-03T15:22:30,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742321_1497 (size=12189) 2024-12-03T15:22:30,649 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/5992d1650e4b495a955eb39caf751d28 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/5992d1650e4b495a955eb39caf751d28 2024-12-03T15:22:30,655 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/C of 6c345cf4429e47e0b5ec5adba6afb04a into 5992d1650e4b495a955eb39caf751d28(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:30,655 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:30,655 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/C, priority=12, startTime=1733239350407; duration=0sec 2024-12-03T15:22:30,655 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:30,655 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:C 2024-12-03T15:22:30,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742322_1498 (size=12151) 2024-12-03T15:22:30,672 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/71c0a82ea7c04eb9937ef50748087626 2024-12-03T15:22:30,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:30,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239410671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239410671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239410673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/2ad28278ffba4857a842160fbb2c212f is 50, key is test_row_0/C:col10/1733239349490/Put/seqid=0 2024-12-03T15:22:30,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742323_1499 (size=12151) 2024-12-03T15:22:30,730 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/2ad28278ffba4857a842160fbb2c212f 2024-12-03T15:22:30,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/d3397f3b0fd54ccb9c929da8706c0309 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/d3397f3b0fd54ccb9c929da8706c0309 2024-12-03T15:22:30,750 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/d3397f3b0fd54ccb9c929da8706c0309, entries=150, sequenceid=153, filesize=11.9 K 2024-12-03T15:22:30,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/71c0a82ea7c04eb9937ef50748087626 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/71c0a82ea7c04eb9937ef50748087626 2024-12-03T15:22:30,756 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/71c0a82ea7c04eb9937ef50748087626, entries=150, sequenceid=153, filesize=11.9 K 2024-12-03T15:22:30,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/2ad28278ffba4857a842160fbb2c212f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/2ad28278ffba4857a842160fbb2c212f 2024-12-03T15:22:30,761 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/2ad28278ffba4857a842160fbb2c212f, entries=150, sequenceid=153, filesize=11.9 K 2024-12-03T15:22:30,762 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 6c345cf4429e47e0b5ec5adba6afb04a in 240ms, sequenceid=153, compaction requested=false 2024-12-03T15:22:30,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:30,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:30,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-03T15:22:30,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-03T15:22:30,774 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-03T15:22:30,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0270 sec 2024-12-03T15:22:30,776 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.0390 sec 2024-12-03T15:22:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:30,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-03T15:22:30,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:30,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:30,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:30,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:30,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:30,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:30,795 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/afb664162bca463cbe766f8396c1fd9c is 50, key is test_row_0/A:col10/1733239350787/Put/seqid=0 2024-12-03T15:22:30,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742324_1500 (size=14541) 2024-12-03T15:22:30,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-03T15:22:30,842 INFO [Thread-2095 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-03T15:22:30,843 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:30,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-03T15:22:30,844 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:30,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-03T15:22:30,845 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:30,845 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:30,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:30,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:30,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239410858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239410858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:30,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239410864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,906 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/cb5e833cdd21430ca49c2cb1b020b24f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/cb5e833cdd21430ca49c2cb1b020b24f 2024-12-03T15:22:30,914 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/B of 6c345cf4429e47e0b5ec5adba6afb04a into cb5e833cdd21430ca49c2cb1b020b24f(size=11.9 K), total size for store is 23.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:30,914 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:30,914 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/B, priority=12, startTime=1733239350407; duration=0sec 2024-12-03T15:22:30,914 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:30,914 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:B 2024-12-03T15:22:30,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-03T15:22:30,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:30,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239410969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:30,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239410970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:30,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239410970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:30,998 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:31,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-03T15:22:31,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:31,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,000 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-03T15:22:31,162 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:31,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-03T15:22:31,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:31,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,166 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:31,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239411174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:31,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:31,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239411177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:31,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:31,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239411181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:31,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/afb664162bca463cbe766f8396c1fd9c 2024-12-03T15:22:31,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/6e71a8f6b086461fb4910a485f9fb85d is 50, key is test_row_0/B:col10/1733239350787/Put/seqid=0 2024-12-03T15:22:31,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742325_1501 (size=12151) 2024-12-03T15:22:31,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/6e71a8f6b086461fb4910a485f9fb85d 2024-12-03T15:22:31,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/e4408a45ffee4efb9111b0209b7d842f is 50, key is test_row_0/C:col10/1733239350787/Put/seqid=0 2024-12-03T15:22:31,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742326_1502 (size=12151) 2024-12-03T15:22:31,323 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:31,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-03T15:22:31,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:31,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-03T15:22:31,478 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:31,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-03T15:22:31,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:31,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:31,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239411476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:31,479 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:31,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239411481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:31,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:31,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239411493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:31,632 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:31,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-03T15:22:31,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:31,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:31,718 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/e4408a45ffee4efb9111b0209b7d842f 2024-12-03T15:22:31,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/afb664162bca463cbe766f8396c1fd9c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/afb664162bca463cbe766f8396c1fd9c 2024-12-03T15:22:31,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/afb664162bca463cbe766f8396c1fd9c, entries=200, sequenceid=168, filesize=14.2 K 2024-12-03T15:22:31,736 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/6e71a8f6b086461fb4910a485f9fb85d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/6e71a8f6b086461fb4910a485f9fb85d 2024-12-03T15:22:31,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/6e71a8f6b086461fb4910a485f9fb85d, entries=150, sequenceid=168, filesize=11.9 K 2024-12-03T15:22:31,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/e4408a45ffee4efb9111b0209b7d842f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/e4408a45ffee4efb9111b0209b7d842f 2024-12-03T15:22:31,750 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/e4408a45ffee4efb9111b0209b7d842f, entries=150, sequenceid=168, filesize=11.9 K 2024-12-03T15:22:31,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6c345cf4429e47e0b5ec5adba6afb04a in 963ms, sequenceid=168, compaction requested=true 2024-12-03T15:22:31,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:31,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:31,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:31,753 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:31,753 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:31,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:31,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:31,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:31,753 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38881 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:31,754 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:31,754 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/A is initiating minor compaction (all files) 2024-12-03T15:22:31,754 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/A in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,754 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/1e4f059743bd47b686d69925436b58f2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/d3397f3b0fd54ccb9c929da8706c0309, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/afb664162bca463cbe766f8396c1fd9c] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=38.0 K 2024-12-03T15:22:31,754 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e4f059743bd47b686d69925436b58f2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733239348296 2024-12-03T15:22:31,754 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36491 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:31,754 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/B is initiating minor compaction (all files) 2024-12-03T15:22:31,754 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/B in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,755 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/cb5e833cdd21430ca49c2cb1b020b24f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/71c0a82ea7c04eb9937ef50748087626, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/6e71a8f6b086461fb4910a485f9fb85d] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=35.6 K 2024-12-03T15:22:31,755 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3397f3b0fd54ccb9c929da8706c0309, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733239349490 2024-12-03T15:22:31,755 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting cb5e833cdd21430ca49c2cb1b020b24f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733239348296 2024-12-03T15:22:31,755 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting afb664162bca463cbe766f8396c1fd9c, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733239350646 2024-12-03T15:22:31,755 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 71c0a82ea7c04eb9937ef50748087626, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733239349490 2024-12-03T15:22:31,757 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e71a8f6b086461fb4910a485f9fb85d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733239350646 2024-12-03T15:22:31,780 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#A#compaction#417 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:31,780 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/c186b019a35c465382e0966f1aa2059d is 50, key is test_row_0/A:col10/1733239350787/Put/seqid=0 2024-12-03T15:22:31,783 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#B#compaction#418 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:31,783 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/e25c1f3e189d4a7091df89aacacef7d6 is 50, key is test_row_0/B:col10/1733239350787/Put/seqid=0 2024-12-03T15:22:31,785 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:31,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-03T15:22:31,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:31,786 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-03T15:22:31,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:31,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:31,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:31,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:31,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:31,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:31,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742327_1503 (size=12391) 2024-12-03T15:22:31,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/ea4c5e470c354544bed404b68825bda5 is 50, key is test_row_0/A:col10/1733239350862/Put/seqid=0 2024-12-03T15:22:31,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742328_1504 (size=12391) 2024-12-03T15:22:31,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742329_1505 (size=12151) 2024-12-03T15:22:31,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-03T15:22:31,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:31,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:32,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239412033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239412033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239412034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239412150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239412150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239412160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,228 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/c186b019a35c465382e0966f1aa2059d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/c186b019a35c465382e0966f1aa2059d 2024-12-03T15:22:32,233 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/e25c1f3e189d4a7091df89aacacef7d6 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e25c1f3e189d4a7091df89aacacef7d6 2024-12-03T15:22:32,239 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/ea4c5e470c354544bed404b68825bda5 2024-12-03T15:22:32,258 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/A of 6c345cf4429e47e0b5ec5adba6afb04a into c186b019a35c465382e0966f1aa2059d(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:32,258 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:32,258 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/A, priority=13, startTime=1733239351753; duration=0sec 2024-12-03T15:22:32,258 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:32,258 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:A 2024-12-03T15:22:32,258 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:32,274 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36491 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:32,274 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/C is initiating minor compaction (all files) 2024-12-03T15:22:32,274 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/C in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:32,274 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/5992d1650e4b495a955eb39caf751d28, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/2ad28278ffba4857a842160fbb2c212f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/e4408a45ffee4efb9111b0209b7d842f] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=35.6 K 2024-12-03T15:22:32,278 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5992d1650e4b495a955eb39caf751d28, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733239348296 2024-12-03T15:22:32,282 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ad28278ffba4857a842160fbb2c212f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733239349490 2024-12-03T15:22:32,282 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/B of 6c345cf4429e47e0b5ec5adba6afb04a into e25c1f3e189d4a7091df89aacacef7d6(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:32,282 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:32,282 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/B, priority=13, startTime=1733239351753; duration=0sec 2024-12-03T15:22:32,282 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:32,282 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:B 2024-12-03T15:22:32,286 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4408a45ffee4efb9111b0209b7d842f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733239350646 2024-12-03T15:22:32,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/8bac8f5861e948d48a4933aab839f58a is 50, key is test_row_0/B:col10/1733239350862/Put/seqid=0 2024-12-03T15:22:32,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742330_1506 (size=12151) 2024-12-03T15:22:32,326 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/8bac8f5861e948d48a4933aab839f58a 2024-12-03T15:22:32,348 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#C#compaction#421 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:32,349 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/946b4379ae474add81e220b664f4de70 is 50, key is test_row_0/C:col10/1733239350787/Put/seqid=0 2024-12-03T15:22:32,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/3d663543847a45ee8062df7f0eba326d is 50, key is test_row_0/C:col10/1733239350862/Put/seqid=0 2024-12-03T15:22:32,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239412362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239412368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239412378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742331_1507 (size=12391) 2024-12-03T15:22:32,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742332_1508 (size=12151) 2024-12-03T15:22:32,437 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/3d663543847a45ee8062df7f0eba326d 2024-12-03T15:22:32,453 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/946b4379ae474add81e220b664f4de70 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/946b4379ae474add81e220b664f4de70 2024-12-03T15:22:32,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/ea4c5e470c354544bed404b68825bda5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/ea4c5e470c354544bed404b68825bda5 2024-12-03T15:22:32,466 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/C of 6c345cf4429e47e0b5ec5adba6afb04a into 946b4379ae474add81e220b664f4de70(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:32,466 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:32,466 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/C, priority=13, startTime=1733239351753; duration=0sec 2024-12-03T15:22:32,466 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:32,466 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:C 2024-12-03T15:22:32,478 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/ea4c5e470c354544bed404b68825bda5, entries=150, sequenceid=192, filesize=11.9 K 2024-12-03T15:22:32,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/8bac8f5861e948d48a4933aab839f58a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8bac8f5861e948d48a4933aab839f58a 2024-12-03T15:22:32,520 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8bac8f5861e948d48a4933aab839f58a, entries=150, sequenceid=192, filesize=11.9 K 2024-12-03T15:22:32,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/3d663543847a45ee8062df7f0eba326d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/3d663543847a45ee8062df7f0eba326d 2024-12-03T15:22:32,543 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/3d663543847a45ee8062df7f0eba326d, entries=150, sequenceid=192, filesize=11.9 K 2024-12-03T15:22:32,548 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 6c345cf4429e47e0b5ec5adba6afb04a in 762ms, sequenceid=192, compaction requested=false 2024-12-03T15:22:32,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:32,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:32,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-03T15:22:32,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-03T15:22:32,569 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-03T15:22:32,569 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7130 sec 2024-12-03T15:22:32,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 1.7260 sec 2024-12-03T15:22:32,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:32,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-03T15:22:32,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:32,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:32,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:32,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:32,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:32,694 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:32,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/3e1befccc5454b41bf2ccae50c175c3c is 50, key is test_row_0/A:col10/1733239352005/Put/seqid=0 2024-12-03T15:22:32,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742333_1509 (size=12151) 2024-12-03T15:22:32,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/3e1befccc5454b41bf2ccae50c175c3c 2024-12-03T15:22:32,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/52cca6002a0540ada528ee42ebbbd34a is 50, key is test_row_0/B:col10/1733239352005/Put/seqid=0 2024-12-03T15:22:32,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239412794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742334_1510 (size=12151) 2024-12-03T15:22:32,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239412810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239412814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239412910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239412927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:32,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239412930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:32,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-03T15:22:32,950 INFO [Thread-2095 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-03T15:22:32,953 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:32,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-12-03T15:22:32,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-03T15:22:32,966 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:32,966 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:32,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:33,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-03T15:22:33,122 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-03T15:22:33,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:33,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:33,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:33,124 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:33,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:33,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:33,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239413126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239413141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239413146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,228 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/52cca6002a0540ada528ee42ebbbd34a 2024-12-03T15:22:33,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/123b9f93249e42f2ba1748f16407ccc5 is 50, key is test_row_0/C:col10/1733239352005/Put/seqid=0 2024-12-03T15:22:33,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-03T15:22:33,278 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-03T15:22:33,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:33,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:33,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:33,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:33,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:33,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:33,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742335_1511 (size=12151) 2024-12-03T15:22:33,318 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/123b9f93249e42f2ba1748f16407ccc5 2024-12-03T15:22:33,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/3e1befccc5454b41bf2ccae50c175c3c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3e1befccc5454b41bf2ccae50c175c3c 2024-12-03T15:22:33,360 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3e1befccc5454b41bf2ccae50c175c3c, entries=150, sequenceid=211, filesize=11.9 K 2024-12-03T15:22:33,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/52cca6002a0540ada528ee42ebbbd34a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/52cca6002a0540ada528ee42ebbbd34a 2024-12-03T15:22:33,367 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/52cca6002a0540ada528ee42ebbbd34a, entries=150, sequenceid=211, filesize=11.9 K 2024-12-03T15:22:33,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/123b9f93249e42f2ba1748f16407ccc5 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/123b9f93249e42f2ba1748f16407ccc5 2024-12-03T15:22:33,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/123b9f93249e42f2ba1748f16407ccc5, entries=150, sequenceid=211, filesize=11.9 K 2024-12-03T15:22:33,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 6c345cf4429e47e0b5ec5adba6afb04a in 688ms, sequenceid=211, compaction requested=true 2024-12-03T15:22:33,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:33,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:33,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:33,375 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:33,375 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:33,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:33,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:33,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:33,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:33,376 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36693 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:33,376 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/A is initiating minor compaction (all files) 2024-12-03T15:22:33,376 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/A in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:33,376 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/c186b019a35c465382e0966f1aa2059d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/ea4c5e470c354544bed404b68825bda5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3e1befccc5454b41bf2ccae50c175c3c] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=35.8 K 2024-12-03T15:22:33,377 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36693 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:33,377 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/B is initiating minor compaction (all files) 2024-12-03T15:22:33,377 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/B in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:33,377 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e25c1f3e189d4a7091df89aacacef7d6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8bac8f5861e948d48a4933aab839f58a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/52cca6002a0540ada528ee42ebbbd34a] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=35.8 K 2024-12-03T15:22:33,377 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting c186b019a35c465382e0966f1aa2059d, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733239350646 2024-12-03T15:22:33,378 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting e25c1f3e189d4a7091df89aacacef7d6, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733239350646 2024-12-03T15:22:33,378 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea4c5e470c354544bed404b68825bda5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1733239350853 2024-12-03T15:22:33,378 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8bac8f5861e948d48a4933aab839f58a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1733239350853 2024-12-03T15:22:33,378 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e1befccc5454b41bf2ccae50c175c3c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733239352005 2024-12-03T15:22:33,379 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 52cca6002a0540ada528ee42ebbbd34a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733239352005 2024-12-03T15:22:33,391 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#B#compaction#426 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:33,392 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/96896503477a4a0bbf59dc880c6c9d3f is 50, key is test_row_0/B:col10/1733239352005/Put/seqid=0 2024-12-03T15:22:33,400 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#A#compaction#427 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:33,400 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/ad9bc6a57bad4e469056112eaca888c8 is 50, key is test_row_0/A:col10/1733239352005/Put/seqid=0 2024-12-03T15:22:33,434 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-03T15:22:33,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:33,434 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-03T15:22:33,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:33,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:33,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:33,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:33,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:33,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:33,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742336_1512 (size=12493) 2024-12-03T15:22:33,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/3df4ea412a7a4e6ea997a215230656f2 is 50, key is test_row_0/A:col10/1733239352801/Put/seqid=0 2024-12-03T15:22:33,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742337_1513 (size=12493) 2024-12-03T15:22:33,466 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/ad9bc6a57bad4e469056112eaca888c8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/ad9bc6a57bad4e469056112eaca888c8 2024-12-03T15:22:33,472 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/A of 6c345cf4429e47e0b5ec5adba6afb04a into ad9bc6a57bad4e469056112eaca888c8(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:33,472 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:33,472 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/A, priority=13, startTime=1733239353375; duration=0sec 2024-12-03T15:22:33,472 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:33,472 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:A 2024-12-03T15:22:33,472 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:33,472 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/96896503477a4a0bbf59dc880c6c9d3f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/96896503477a4a0bbf59dc880c6c9d3f 2024-12-03T15:22:33,474 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36693 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:33,474 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/C is initiating minor compaction (all files) 2024-12-03T15:22:33,474 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/C in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:33,474 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/946b4379ae474add81e220b664f4de70, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/3d663543847a45ee8062df7f0eba326d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/123b9f93249e42f2ba1748f16407ccc5] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=35.8 K 2024-12-03T15:22:33,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:33,475 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 946b4379ae474add81e220b664f4de70, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733239350646 2024-12-03T15:22:33,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:33,476 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d663543847a45ee8062df7f0eba326d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1733239350853 2024-12-03T15:22:33,476 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 123b9f93249e42f2ba1748f16407ccc5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733239352005 2024-12-03T15:22:33,478 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/B of 6c345cf4429e47e0b5ec5adba6afb04a into 96896503477a4a0bbf59dc880c6c9d3f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:33,478 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:33,478 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/B, priority=13, startTime=1733239353375; duration=0sec 2024-12-03T15:22:33,478 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:33,478 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:B 2024-12-03T15:22:33,487 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#C#compaction#429 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:33,488 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/0e8b28b5f1e045b785ef096257a5e9d4 is 50, key is test_row_0/C:col10/1733239352005/Put/seqid=0 2024-12-03T15:22:33,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742338_1514 (size=12151) 2024-12-03T15:22:33,502 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/3df4ea412a7a4e6ea997a215230656f2 2024-12-03T15:22:33,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239413517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/896844d8278849f39d3d98085a3727f0 is 50, key is test_row_0/B:col10/1733239352801/Put/seqid=0 2024-12-03T15:22:33,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239413528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239413529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742339_1515 (size=12493) 2024-12-03T15:22:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-03T15:22:33,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742340_1516 (size=12151) 2024-12-03T15:22:33,574 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/896844d8278849f39d3d98085a3727f0 2024-12-03T15:22:33,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/19be25d22a124b7491e30a091d17a239 is 50, key is test_row_0/C:col10/1733239352801/Put/seqid=0 2024-12-03T15:22:33,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239413630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239413636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239413636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742341_1517 (size=12151) 2024-12-03T15:22:33,666 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/19be25d22a124b7491e30a091d17a239 2024-12-03T15:22:33,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/3df4ea412a7a4e6ea997a215230656f2 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3df4ea412a7a4e6ea997a215230656f2 2024-12-03T15:22:33,685 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3df4ea412a7a4e6ea997a215230656f2, entries=150, sequenceid=232, filesize=11.9 K 2024-12-03T15:22:33,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/896844d8278849f39d3d98085a3727f0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/896844d8278849f39d3d98085a3727f0 2024-12-03T15:22:33,690 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/896844d8278849f39d3d98085a3727f0, entries=150, sequenceid=232, filesize=11.9 K 2024-12-03T15:22:33,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/19be25d22a124b7491e30a091d17a239 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/19be25d22a124b7491e30a091d17a239 2024-12-03T15:22:33,696 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/19be25d22a124b7491e30a091d17a239, entries=150, sequenceid=232, filesize=11.9 K 2024-12-03T15:22:33,697 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 6c345cf4429e47e0b5ec5adba6afb04a in 263ms, sequenceid=232, compaction requested=false 2024-12-03T15:22:33,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:33,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:33,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-03T15:22:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-03T15:22:33,700 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-03T15:22:33,700 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 733 msec 2024-12-03T15:22:33,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 748 msec 2024-12-03T15:22:33,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:33,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-03T15:22:33,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:33,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:33,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:33,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:33,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:33,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:33,862 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/35840e7ff84f45678984804bbd563392 is 50, key is test_row_0/A:col10/1733239353842/Put/seqid=0 2024-12-03T15:22:33,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742342_1518 (size=16931) 2024-12-03T15:22:33,905 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/35840e7ff84f45678984804bbd563392 2024-12-03T15:22:33,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/53838be22efd45a98243fee452451f4a is 50, key is test_row_0/B:col10/1733239353842/Put/seqid=0 2024-12-03T15:22:33,951 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/0e8b28b5f1e045b785ef096257a5e9d4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/0e8b28b5f1e045b785ef096257a5e9d4 2024-12-03T15:22:33,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239413936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,955 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/C of 6c345cf4429e47e0b5ec5adba6afb04a into 0e8b28b5f1e045b785ef096257a5e9d4(size=12.2 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:33,955 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:33,955 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/C, priority=13, startTime=1733239353376; duration=0sec 2024-12-03T15:22:33,955 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:33,955 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:C 2024-12-03T15:22:33,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239413947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239413955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742343_1519 (size=12151) 2024-12-03T15:22:33,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:33,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32788 deadline: 1733239413982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:33,986 DEBUG [Thread-2091 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:22:34,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32768 deadline: 1733239414000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,005 DEBUG [Thread-2087 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8184 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:22:34,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239414056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-03T15:22:34,070 INFO [Thread-2095 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-03T15:22:34,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,072 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:34,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239414065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239414066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-03T15:22:34,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-03T15:22:34,075 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:34,075 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:34,075 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:34,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-03T15:22:34,226 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,230 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-03T15:22:34,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:34,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:34,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:34,230 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:34,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:34,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:34,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239414268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239414282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239414282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-03T15:22:34,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/53838be22efd45a98243fee452451f4a 2024-12-03T15:22:34,391 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-03T15:22:34,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:34,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:34,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:34,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:34,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:34,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:34,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/b829e8aa022a4538b13f77516188aec1 is 50, key is test_row_0/C:col10/1733239353842/Put/seqid=0 2024-12-03T15:22:34,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742344_1520 (size=12151) 2024-12-03T15:22:34,476 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/b829e8aa022a4538b13f77516188aec1 2024-12-03T15:22:34,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/35840e7ff84f45678984804bbd563392 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/35840e7ff84f45678984804bbd563392 2024-12-03T15:22:34,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/35840e7ff84f45678984804bbd563392, entries=250, sequenceid=252, filesize=16.5 K 2024-12-03T15:22:34,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/53838be22efd45a98243fee452451f4a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/53838be22efd45a98243fee452451f4a 2024-12-03T15:22:34,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/53838be22efd45a98243fee452451f4a, entries=150, sequenceid=252, filesize=11.9 K 2024-12-03T15:22:34,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/b829e8aa022a4538b13f77516188aec1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b829e8aa022a4538b13f77516188aec1 2024-12-03T15:22:34,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b829e8aa022a4538b13f77516188aec1, entries=150, sequenceid=252, filesize=11.9 K 2024-12-03T15:22:34,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 6c345cf4429e47e0b5ec5adba6afb04a in 689ms, sequenceid=252, compaction requested=true 2024-12-03T15:22:34,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:34,536 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:34,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:34,536 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:34,536 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:34,537 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41575 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:34,537 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:34,537 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/A is initiating minor compaction (all files) 2024-12-03T15:22:34,537 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/B is initiating minor compaction (all files) 2024-12-03T15:22:34,537 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/A in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:34,537 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/B in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:34,537 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/ad9bc6a57bad4e469056112eaca888c8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3df4ea412a7a4e6ea997a215230656f2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/35840e7ff84f45678984804bbd563392] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=40.6 K 2024-12-03T15:22:34,537 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/96896503477a4a0bbf59dc880c6c9d3f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/896844d8278849f39d3d98085a3727f0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/53838be22efd45a98243fee452451f4a] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=35.9 K 2024-12-03T15:22:34,538 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 96896503477a4a0bbf59dc880c6c9d3f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733239352005 2024-12-03T15:22:34,538 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 896844d8278849f39d3d98085a3727f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733239352763 2024-12-03T15:22:34,538 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 53838be22efd45a98243fee452451f4a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733239353524 2024-12-03T15:22:34,539 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad9bc6a57bad4e469056112eaca888c8, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733239352005 2024-12-03T15:22:34,539 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3df4ea412a7a4e6ea997a215230656f2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733239352763 2024-12-03T15:22:34,540 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35840e7ff84f45678984804bbd563392, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733239353513 2024-12-03T15:22:34,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:34,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:34,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:34,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:34,548 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#B#compaction#435 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:34,549 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/2588173504844b27b17d2b30988d8b25 is 50, key is test_row_0/B:col10/1733239353842/Put/seqid=0 2024-12-03T15:22:34,552 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,554 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-03T15:22:34,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:34,554 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-03T15:22:34,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:34,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:34,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:34,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:34,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:34,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:34,556 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#A#compaction#436 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:34,556 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/0f3e7c6f65334b73bd5f71228bb18c54 is 50, key is test_row_0/A:col10/1733239353842/Put/seqid=0 2024-12-03T15:22:34,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/39ba66fb8a0c48b99a48fdfd3d523304 is 50, key is test_row_0/A:col10/1733239353944/Put/seqid=0 2024-12-03T15:22:34,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742345_1521 (size=12595) 2024-12-03T15:22:34,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:34,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:34,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742346_1522 (size=12595) 2024-12-03T15:22:34,603 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/0f3e7c6f65334b73bd5f71228bb18c54 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/0f3e7c6f65334b73bd5f71228bb18c54 2024-12-03T15:22:34,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742347_1523 (size=12301) 2024-12-03T15:22:34,624 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/A of 6c345cf4429e47e0b5ec5adba6afb04a into 0f3e7c6f65334b73bd5f71228bb18c54(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:34,625 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:34,625 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/A, priority=13, startTime=1733239354536; duration=0sec 2024-12-03T15:22:34,625 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:34,625 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:A 2024-12-03T15:22:34,625 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:34,627 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:34,627 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/C is initiating minor compaction (all files) 2024-12-03T15:22:34,627 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/C in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:34,628 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/0e8b28b5f1e045b785ef096257a5e9d4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/19be25d22a124b7491e30a091d17a239, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b829e8aa022a4538b13f77516188aec1] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=35.9 K 2024-12-03T15:22:34,628 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e8b28b5f1e045b785ef096257a5e9d4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733239352005 2024-12-03T15:22:34,630 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19be25d22a124b7491e30a091d17a239, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733239352763 2024-12-03T15:22:34,638 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting b829e8aa022a4538b13f77516188aec1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733239353524 2024-12-03T15:22:34,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239414650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239414654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239414658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-03T15:22:34,692 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#C#compaction#438 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:34,693 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/c6ad45b4d4a94f62a567601ee5e66fd6 is 50, key is test_row_0/C:col10/1733239353842/Put/seqid=0 2024-12-03T15:22:34,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742348_1524 (size=12595) 2024-12-03T15:22:34,757 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/c6ad45b4d4a94f62a567601ee5e66fd6 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/c6ad45b4d4a94f62a567601ee5e66fd6 2024-12-03T15:22:34,762 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/C of 6c345cf4429e47e0b5ec5adba6afb04a into c6ad45b4d4a94f62a567601ee5e66fd6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:34,762 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:34,762 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/C, priority=13, startTime=1733239354542; duration=0sec 2024-12-03T15:22:34,762 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:34,762 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:C 2024-12-03T15:22:34,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239414772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239414772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239414775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239414974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239414975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,992 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/2588173504844b27b17d2b30988d8b25 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/2588173504844b27b17d2b30988d8b25 2024-12-03T15:22:34,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:34,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239414987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:34,998 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/B of 6c345cf4429e47e0b5ec5adba6afb04a into 2588173504844b27b17d2b30988d8b25(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:34,998 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:34,998 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/B, priority=13, startTime=1733239354536; duration=0sec 2024-12-03T15:22:34,998 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:34,998 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:B 2024-12-03T15:22:35,009 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/39ba66fb8a0c48b99a48fdfd3d523304 2024-12-03T15:22:35,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/88d31e482f0047c9b99edb45a1b7aae1 is 50, key is test_row_0/B:col10/1733239353944/Put/seqid=0 2024-12-03T15:22:35,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742349_1525 (size=12301) 2024-12-03T15:22:35,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-03T15:22:35,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:35,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239415284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:35,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:35,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239415285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:35,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:35,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239415302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:35,484 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/88d31e482f0047c9b99edb45a1b7aae1 2024-12-03T15:22:35,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/14a988eb774b4a30aed5fbb11051cd61 is 50, key is test_row_0/C:col10/1733239353944/Put/seqid=0 2024-12-03T15:22:35,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742350_1526 (size=12301) 2024-12-03T15:22:35,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:35,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239415790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:35,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:35,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239415791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:35,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:35,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239415812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:35,910 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/14a988eb774b4a30aed5fbb11051cd61 2024-12-03T15:22:35,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/39ba66fb8a0c48b99a48fdfd3d523304 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/39ba66fb8a0c48b99a48fdfd3d523304 2024-12-03T15:22:35,923 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/39ba66fb8a0c48b99a48fdfd3d523304, entries=150, sequenceid=272, filesize=12.0 K 2024-12-03T15:22:35,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/88d31e482f0047c9b99edb45a1b7aae1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/88d31e482f0047c9b99edb45a1b7aae1 2024-12-03T15:22:35,939 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/88d31e482f0047c9b99edb45a1b7aae1, entries=150, sequenceid=272, filesize=12.0 K 2024-12-03T15:22:35,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/14a988eb774b4a30aed5fbb11051cd61 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/14a988eb774b4a30aed5fbb11051cd61 2024-12-03T15:22:35,959 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/14a988eb774b4a30aed5fbb11051cd61, entries=150, sequenceid=272, filesize=12.0 K 2024-12-03T15:22:35,960 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 6c345cf4429e47e0b5ec5adba6afb04a in 1406ms, sequenceid=272, compaction requested=false 2024-12-03T15:22:35,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:35,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:35,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-03T15:22:35,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-03T15:22:35,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-03T15:22:35,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8890 sec 2024-12-03T15:22:35,980 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.9020 sec 2024-12-03T15:22:36,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-03T15:22:36,185 INFO [Thread-2095 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-03T15:22:36,186 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:36,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-03T15:22:36,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-03T15:22:36,187 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:36,188 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:36,188 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:36,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-03T15:22:36,339 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:36,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-03T15:22:36,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:36,340 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-03T15:22:36,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:36,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:36,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:36,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:36,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:36,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:36,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/6741fdd585e345ce9977930bc6f13bca is 50, key is test_row_0/A:col10/1733239354649/Put/seqid=0 2024-12-03T15:22:36,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742351_1527 (size=12301) 2024-12-03T15:22:36,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-03T15:22:36,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-03T15:22:36,796 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/6741fdd585e345ce9977930bc6f13bca 2024-12-03T15:22:36,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:36,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:36,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/9b365b8b56664d0eb524dd8f2c34688f is 50, key is test_row_0/B:col10/1733239354649/Put/seqid=0 2024-12-03T15:22:36,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742352_1528 (size=12301) 2024-12-03T15:22:36,825 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/9b365b8b56664d0eb524dd8f2c34688f 2024-12-03T15:22:36,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/13d13d86628e4ca19156bf6e752caec7 is 50, key is test_row_0/C:col10/1733239354649/Put/seqid=0 2024-12-03T15:22:36,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742353_1529 (size=12301) 2024-12-03T15:22:36,836 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/13d13d86628e4ca19156bf6e752caec7 2024-12-03T15:22:36,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/6741fdd585e345ce9977930bc6f13bca as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/6741fdd585e345ce9977930bc6f13bca 2024-12-03T15:22:36,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:36,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239416839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:36,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:36,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239416840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:36,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:36,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239416842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:36,849 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/6741fdd585e345ce9977930bc6f13bca, entries=150, sequenceid=292, filesize=12.0 K 2024-12-03T15:22:36,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/9b365b8b56664d0eb524dd8f2c34688f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/9b365b8b56664d0eb524dd8f2c34688f 2024-12-03T15:22:36,853 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/9b365b8b56664d0eb524dd8f2c34688f, entries=150, sequenceid=292, filesize=12.0 K 2024-12-03T15:22:36,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/13d13d86628e4ca19156bf6e752caec7 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/13d13d86628e4ca19156bf6e752caec7 2024-12-03T15:22:36,858 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/13d13d86628e4ca19156bf6e752caec7, entries=150, sequenceid=292, filesize=12.0 K 2024-12-03T15:22:36,859 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 6c345cf4429e47e0b5ec5adba6afb04a in 519ms, sequenceid=292, compaction requested=true 2024-12-03T15:22:36,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:36,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:36,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-03T15:22:36,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-03T15:22:36,861 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-03T15:22:36,861 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 672 msec 2024-12-03T15:22:36,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 676 msec 2024-12-03T15:22:36,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:36,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-03T15:22:36,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:36,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:36,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:36,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:36,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:36,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:36,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/14fe10421c074594bad9bde413514e3d is 50, key is test_row_0/A:col10/1733239356945/Put/seqid=0 2024-12-03T15:22:36,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742354_1530 (size=14741) 2024-12-03T15:22:36,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:36,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239416975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:36,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:36,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239416976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:36,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:36,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239416976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239417085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239417085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239417086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-03T15:22:37,294 INFO [Thread-2095 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-03T15:22:37,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239417294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239417298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239417298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,306 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:37,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-03T15:22:37,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-03T15:22:37,321 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:37,323 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:37,323 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:37,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/14fe10421c074594bad9bde413514e3d 2024-12-03T15:22:37,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/061332ee317e4a24abfaaf3e6c468893 is 50, key is test_row_0/B:col10/1733239356945/Put/seqid=0 2024-12-03T15:22:37,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-03T15:22:37,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742355_1531 (size=12301) 2024-12-03T15:22:37,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/061332ee317e4a24abfaaf3e6c468893 2024-12-03T15:22:37,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/b058876b00ca4e5fb030af26d2a64ae0 is 50, key is test_row_0/C:col10/1733239356945/Put/seqid=0 2024-12-03T15:22:37,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742356_1532 (size=12301) 2024-12-03T15:22:37,472 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/b058876b00ca4e5fb030af26d2a64ae0 2024-12-03T15:22:37,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/14fe10421c074594bad9bde413514e3d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/14fe10421c074594bad9bde413514e3d 2024-12-03T15:22:37,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/14fe10421c074594bad9bde413514e3d, entries=200, sequenceid=313, filesize=14.4 K 2024-12-03T15:22:37,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/061332ee317e4a24abfaaf3e6c468893 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/061332ee317e4a24abfaaf3e6c468893 2024-12-03T15:22:37,480 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-03T15:22:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:37,481 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:37,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:37,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:37,484 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/061332ee317e4a24abfaaf3e6c468893, entries=150, sequenceid=313, filesize=12.0 K 2024-12-03T15:22:37,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/b058876b00ca4e5fb030af26d2a64ae0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b058876b00ca4e5fb030af26d2a64ae0 2024-12-03T15:22:37,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b058876b00ca4e5fb030af26d2a64ae0, entries=150, sequenceid=313, filesize=12.0 K 2024-12-03T15:22:37,488 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 6c345cf4429e47e0b5ec5adba6afb04a in 542ms, sequenceid=313, compaction requested=true 2024-12-03T15:22:37,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:37,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:37,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:37,488 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:22:37,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:37,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:37,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:37,488 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:22:37,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:37,490 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49498 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:22:37,490 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/B is initiating minor compaction (all files) 2024-12-03T15:22:37,490 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/B in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:37,490 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/2588173504844b27b17d2b30988d8b25, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/88d31e482f0047c9b99edb45a1b7aae1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/9b365b8b56664d0eb524dd8f2c34688f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/061332ee317e4a24abfaaf3e6c468893] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=48.3 K 2024-12-03T15:22:37,490 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51938 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:22:37,490 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 2588173504844b27b17d2b30988d8b25, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733239353524 2024-12-03T15:22:37,490 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/A is initiating minor compaction (all files) 2024-12-03T15:22:37,490 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/A in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:37,491 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/0f3e7c6f65334b73bd5f71228bb18c54, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/39ba66fb8a0c48b99a48fdfd3d523304, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/6741fdd585e345ce9977930bc6f13bca, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/14fe10421c074594bad9bde413514e3d] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=50.7 K 2024-12-03T15:22:37,491 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 88d31e482f0047c9b99edb45a1b7aae1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733239353921 2024-12-03T15:22:37,491 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f3e7c6f65334b73bd5f71228bb18c54, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733239353524 2024-12-03T15:22:37,492 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b365b8b56664d0eb524dd8f2c34688f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733239354622 2024-12-03T15:22:37,492 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39ba66fb8a0c48b99a48fdfd3d523304, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733239353921 2024-12-03T15:22:37,492 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 061332ee317e4a24abfaaf3e6c468893, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733239356819 2024-12-03T15:22:37,492 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6741fdd585e345ce9977930bc6f13bca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733239354622 2024-12-03T15:22:37,493 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14fe10421c074594bad9bde413514e3d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733239356819 2024-12-03T15:22:37,509 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#A#compaction#447 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:37,509 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/fd002742156c46ad951dc3227981e9a1 is 50, key is test_row_0/A:col10/1733239356945/Put/seqid=0 2024-12-03T15:22:37,511 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#B#compaction#448 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:37,512 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/9723ab10b1dd4b569852e64fd9dd7550 is 50, key is test_row_0/B:col10/1733239356945/Put/seqid=0 2024-12-03T15:22:37,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742357_1533 (size=12881) 2024-12-03T15:22:37,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742358_1534 (size=12881) 2024-12-03T15:22:37,525 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/9723ab10b1dd4b569852e64fd9dd7550 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/9723ab10b1dd4b569852e64fd9dd7550 2024-12-03T15:22:37,530 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/B of 6c345cf4429e47e0b5ec5adba6afb04a into 9723ab10b1dd4b569852e64fd9dd7550(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:37,530 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:37,530 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/B, priority=12, startTime=1733239357488; duration=0sec 2024-12-03T15:22:37,530 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:37,530 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:B 2024-12-03T15:22:37,531 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:22:37,532 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49498 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:22:37,532 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/C is initiating minor compaction (all files) 2024-12-03T15:22:37,532 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/C in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:37,532 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/c6ad45b4d4a94f62a567601ee5e66fd6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/14a988eb774b4a30aed5fbb11051cd61, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/13d13d86628e4ca19156bf6e752caec7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b058876b00ca4e5fb030af26d2a64ae0] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=48.3 K 2024-12-03T15:22:37,532 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c6ad45b4d4a94f62a567601ee5e66fd6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733239353524 2024-12-03T15:22:37,533 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 14a988eb774b4a30aed5fbb11051cd61, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733239353921 2024-12-03T15:22:37,533 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 13d13d86628e4ca19156bf6e752caec7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733239354622 2024-12-03T15:22:37,533 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b058876b00ca4e5fb030af26d2a64ae0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733239356819 2024-12-03T15:22:37,540 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#C#compaction#449 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:37,540 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/f951de277d6a4f8fb1c7313ab0c58840 is 50, key is test_row_0/C:col10/1733239356945/Put/seqid=0 2024-12-03T15:22:37,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742359_1535 (size=12881) 2024-12-03T15:22:37,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:37,614 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-03T15:22:37,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:37,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:37,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:37,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:37,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:37,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-03T15:22:37,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/2632635d010e4648ac4e6a2bb2593889 is 50, key is test_row_0/A:col10/1733239357608/Put/seqid=0 2024-12-03T15:22:37,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742360_1536 (size=12301) 2024-12-03T15:22:37,626 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/2632635d010e4648ac4e6a2bb2593889 2024-12-03T15:22:37,633 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,633 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-03T15:22:37,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:37,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:37,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:37,633 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:37,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:37,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/8cba4e811fb34abd84517cc3e863144f is 50, key is test_row_0/B:col10/1733239357608/Put/seqid=0 2024-12-03T15:22:37,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742361_1537 (size=12301) 2024-12-03T15:22:37,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239417653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239417655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239417655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239417762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239417763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239417766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,785 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-03T15:22:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:37,786 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:37,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:37,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-03T15:22:37,926 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/fd002742156c46ad951dc3227981e9a1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/fd002742156c46ad951dc3227981e9a1 2024-12-03T15:22:37,934 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/A of 6c345cf4429e47e0b5ec5adba6afb04a into fd002742156c46ad951dc3227981e9a1(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:37,934 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:37,934 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/A, priority=12, startTime=1733239357488; duration=0sec 2024-12-03T15:22:37,934 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:37,934 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:A 2024-12-03T15:22:37,946 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-03T15:22:37,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:37,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:37,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:37,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:37,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:37,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:37,973 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/f951de277d6a4f8fb1c7313ab0c58840 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f951de277d6a4f8fb1c7313ab0c58840 2024-12-03T15:22:37,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239417967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239417969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:37,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239417974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:37,991 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/C of 6c345cf4429e47e0b5ec5adba6afb04a into f951de277d6a4f8fb1c7313ab0c58840(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:37,991 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:37,991 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/C, priority=12, startTime=1733239357488; duration=0sec 2024-12-03T15:22:37,992 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:37,992 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:C 2024-12-03T15:22:38,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/8cba4e811fb34abd84517cc3e863144f 2024-12-03T15:22:38,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/184398c37ed14e52add8d423ba976e49 is 50, key is test_row_0/C:col10/1733239357608/Put/seqid=0 2024-12-03T15:22:38,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742362_1538 (size=12301) 2024-12-03T15:22:38,101 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-03T15:22:38,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:38,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:38,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:38,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:38,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:38,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/184398c37ed14e52add8d423ba976e49 2024-12-03T15:22:38,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/2632635d010e4648ac4e6a2bb2593889 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/2632635d010e4648ac4e6a2bb2593889 2024-12-03T15:22:38,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/2632635d010e4648ac4e6a2bb2593889, entries=150, sequenceid=331, filesize=12.0 K 2024-12-03T15:22:38,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/8cba4e811fb34abd84517cc3e863144f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8cba4e811fb34abd84517cc3e863144f 2024-12-03T15:22:38,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8cba4e811fb34abd84517cc3e863144f, entries=150, sequenceid=331, filesize=12.0 K 2024-12-03T15:22:38,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/184398c37ed14e52add8d423ba976e49 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/184398c37ed14e52add8d423ba976e49 2024-12-03T15:22:38,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/184398c37ed14e52add8d423ba976e49, entries=150, sequenceid=331, filesize=12.0 K 2024-12-03T15:22:38,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 6c345cf4429e47e0b5ec5adba6afb04a in 516ms, sequenceid=331, compaction requested=false 2024-12-03T15:22:38,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:38,254 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-03T15:22:38,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:38,254 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-03T15:22:38,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:38,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:38,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:38,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:38,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:38,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:38,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/786114cdf9b34076a4d12cc1e9fd6b92 is 50, key is test_row_0/A:col10/1733239357653/Put/seqid=0 2024-12-03T15:22:38,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:38,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:38,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742363_1539 (size=12301) 2024-12-03T15:22:38,286 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/786114cdf9b34076a4d12cc1e9fd6b92 2024-12-03T15:22:38,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/11357f67eed24ef1851c71464a611c64 is 50, key is test_row_0/B:col10/1733239357653/Put/seqid=0 2024-12-03T15:22:38,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742364_1540 (size=12301) 2024-12-03T15:22:38,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239418320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239418320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239418322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-03T15:22:38,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239418427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239418427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239418430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:38,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239418632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:38,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239418632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:38,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239418633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,710 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/11357f67eed24ef1851c71464a611c64 2024-12-03T15:22:38,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/4ab3f4a546974257bcb1c8425de3041e is 50, key is test_row_0/C:col10/1733239357653/Put/seqid=0 2024-12-03T15:22:38,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742365_1541 (size=12301) 2024-12-03T15:22:38,758 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/4ab3f4a546974257bcb1c8425de3041e 2024-12-03T15:22:38,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/786114cdf9b34076a4d12cc1e9fd6b92 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/786114cdf9b34076a4d12cc1e9fd6b92 2024-12-03T15:22:38,768 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/786114cdf9b34076a4d12cc1e9fd6b92, entries=150, sequenceid=352, filesize=12.0 K 2024-12-03T15:22:38,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/11357f67eed24ef1851c71464a611c64 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/11357f67eed24ef1851c71464a611c64 2024-12-03T15:22:38,773 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/11357f67eed24ef1851c71464a611c64, entries=150, sequenceid=352, filesize=12.0 K 2024-12-03T15:22:38,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/4ab3f4a546974257bcb1c8425de3041e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/4ab3f4a546974257bcb1c8425de3041e 2024-12-03T15:22:38,778 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/4ab3f4a546974257bcb1c8425de3041e, entries=150, sequenceid=352, filesize=12.0 K 2024-12-03T15:22:38,778 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 6c345cf4429e47e0b5ec5adba6afb04a in 524ms, sequenceid=352, compaction requested=true 2024-12-03T15:22:38,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:38,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:38,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-03T15:22:38,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-03T15:22:38,781 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-03T15:22:38,781 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4570 sec 2024-12-03T15:22:38,782 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.4750 sec 2024-12-03T15:22:38,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:38,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-03T15:22:38,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:38,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:38,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:38,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:38,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:38,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:38,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/b68d0113e9a74091bb90b67bb94a77aa is 50, key is test_row_0/A:col10/1733239358319/Put/seqid=0 2024-12-03T15:22:38,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742366_1542 (size=14741) 2024-12-03T15:22:38,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:38,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239418972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239418978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:38,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239418982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239419080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239419086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239419088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:39,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239419285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:39,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239419294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:39,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239419294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,350 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/b68d0113e9a74091bb90b67bb94a77aa 2024-12-03T15:22:39,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/156fcc85a15747db9d3f47776e5b2b38 is 50, key is test_row_0/B:col10/1733239358319/Put/seqid=0 2024-12-03T15:22:39,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742367_1543 (size=12301) 2024-12-03T15:22:39,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/156fcc85a15747db9d3f47776e5b2b38 2024-12-03T15:22:39,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/6c7c2c59a93e486c86716c9aa394de9a is 50, key is test_row_0/C:col10/1733239358319/Put/seqid=0 2024-12-03T15:22:39,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742368_1544 (size=12301) 2024-12-03T15:22:39,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-03T15:22:39,418 INFO [Thread-2095 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-03T15:22:39,419 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:39,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-03T15:22:39,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-03T15:22:39,421 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:39,421 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:39,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:39,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-03T15:22:39,573 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,573 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-03T15:22:39,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:39,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:39,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:39,574 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:39,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:39,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:39,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:39,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239419592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:39,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239419597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:39,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239419597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-03T15:22:39,726 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-03T15:22:39,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:39,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:39,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:39,727 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:39,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:39,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:39,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/6c7c2c59a93e486c86716c9aa394de9a 2024-12-03T15:22:39,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/b68d0113e9a74091bb90b67bb94a77aa as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/b68d0113e9a74091bb90b67bb94a77aa 2024-12-03T15:22:39,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/b68d0113e9a74091bb90b67bb94a77aa, entries=200, sequenceid=370, filesize=14.4 K 2024-12-03T15:22:39,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/156fcc85a15747db9d3f47776e5b2b38 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/156fcc85a15747db9d3f47776e5b2b38 2024-12-03T15:22:39,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/156fcc85a15747db9d3f47776e5b2b38, entries=150, sequenceid=370, filesize=12.0 K 2024-12-03T15:22:39,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/6c7c2c59a93e486c86716c9aa394de9a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/6c7c2c59a93e486c86716c9aa394de9a 2024-12-03T15:22:39,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/6c7c2c59a93e486c86716c9aa394de9a, entries=150, sequenceid=370, filesize=12.0 K 2024-12-03T15:22:39,793 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 6c345cf4429e47e0b5ec5adba6afb04a in 852ms, sequenceid=370, compaction requested=true 2024-12-03T15:22:39,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:39,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:39,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:39,793 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:22:39,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:39,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:39,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:39,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:39,793 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:22:39,794 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49784 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:22:39,794 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52224 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:22:39,794 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/A is initiating minor compaction (all files) 2024-12-03T15:22:39,794 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/B is initiating minor compaction (all files) 2024-12-03T15:22:39,795 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/A in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:39,795 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/B in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:39,795 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/fd002742156c46ad951dc3227981e9a1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/2632635d010e4648ac4e6a2bb2593889, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/786114cdf9b34076a4d12cc1e9fd6b92, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/b68d0113e9a74091bb90b67bb94a77aa] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=51 K 2024-12-03T15:22:39,795 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/9723ab10b1dd4b569852e64fd9dd7550, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8cba4e811fb34abd84517cc3e863144f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/11357f67eed24ef1851c71464a611c64, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/156fcc85a15747db9d3f47776e5b2b38] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=48.6 K 2024-12-03T15:22:39,795 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd002742156c46ad951dc3227981e9a1, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733239356819 2024-12-03T15:22:39,795 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 9723ab10b1dd4b569852e64fd9dd7550, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733239356819 2024-12-03T15:22:39,795 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2632635d010e4648ac4e6a2bb2593889, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733239356962 2024-12-03T15:22:39,795 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cba4e811fb34abd84517cc3e863144f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733239356962 2024-12-03T15:22:39,795 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 11357f67eed24ef1851c71464a611c64, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733239357635 2024-12-03T15:22:39,796 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 786114cdf9b34076a4d12cc1e9fd6b92, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733239357635 2024-12-03T15:22:39,796 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 156fcc85a15747db9d3f47776e5b2b38, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1733239358303 2024-12-03T15:22:39,796 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting b68d0113e9a74091bb90b67bb94a77aa, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1733239358303 2024-12-03T15:22:39,802 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#B#compaction#459 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:39,803 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/e746e9be171c4427b71ce52c7965de9e is 50, key is test_row_0/B:col10/1733239358319/Put/seqid=0 2024-12-03T15:22:39,807 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#A#compaction#460 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:39,808 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/7c1b719a86e54b03a5f7c8b7385bc47b is 50, key is test_row_0/A:col10/1733239358319/Put/seqid=0 2024-12-03T15:22:39,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742369_1545 (size=13017) 2024-12-03T15:22:39,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742370_1546 (size=13017) 2024-12-03T15:22:39,878 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:39,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-03T15:22:39,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:39,879 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-03T15:22:39,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:39,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:39,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:39,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:39,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:39,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:39,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/28dae1e8074a46bc9f89e10bbb8ed664 is 50, key is test_row_0/A:col10/1733239358967/Put/seqid=0 2024-12-03T15:22:39,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742371_1547 (size=12301) 2024-12-03T15:22:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-03T15:22:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:40,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:40,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239420134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239420135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239420136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,219 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/e746e9be171c4427b71ce52c7965de9e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e746e9be171c4427b71ce52c7965de9e 2024-12-03T15:22:40,221 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/7c1b719a86e54b03a5f7c8b7385bc47b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/7c1b719a86e54b03a5f7c8b7385bc47b 2024-12-03T15:22:40,224 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/B of 6c345cf4429e47e0b5ec5adba6afb04a into e746e9be171c4427b71ce52c7965de9e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:40,225 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:40,225 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/B, priority=12, startTime=1733239359793; duration=0sec 2024-12-03T15:22:40,225 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:40,225 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:B 2024-12-03T15:22:40,225 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:22:40,227 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49784 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:22:40,227 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/C is initiating minor compaction (all files) 2024-12-03T15:22:40,227 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/C in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:40,227 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/A of 6c345cf4429e47e0b5ec5adba6afb04a into 7c1b719a86e54b03a5f7c8b7385bc47b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:40,227 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:40,227 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f951de277d6a4f8fb1c7313ab0c58840, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/184398c37ed14e52add8d423ba976e49, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/4ab3f4a546974257bcb1c8425de3041e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/6c7c2c59a93e486c86716c9aa394de9a] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=48.6 K 2024-12-03T15:22:40,227 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/A, priority=12, startTime=1733239359793; duration=0sec 2024-12-03T15:22:40,227 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:40,227 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:A 2024-12-03T15:22:40,228 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting f951de277d6a4f8fb1c7313ab0c58840, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733239356819 2024-12-03T15:22:40,228 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 184398c37ed14e52add8d423ba976e49, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733239356962 2024-12-03T15:22:40,229 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ab3f4a546974257bcb1c8425de3041e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733239357635 2024-12-03T15:22:40,229 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c7c2c59a93e486c86716c9aa394de9a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1733239358303 2024-12-03T15:22:40,236 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#C#compaction#462 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:40,237 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/034ed1a61d984b81a4cbd1db5ad311f1 is 50, key is test_row_0/C:col10/1733239358319/Put/seqid=0 2024-12-03T15:22:40,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239420241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239420241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239420242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742372_1548 (size=13017) 2024-12-03T15:22:40,289 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/28dae1e8074a46bc9f89e10bbb8ed664 2024-12-03T15:22:40,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/ecdcd15edaa649858fb6cecaf7cc6f74 is 50, key is test_row_0/B:col10/1733239358967/Put/seqid=0 2024-12-03T15:22:40,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742373_1549 (size=12301) 2024-12-03T15:22:40,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239420445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239420445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239420446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-03T15:22:40,659 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/034ed1a61d984b81a4cbd1db5ad311f1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/034ed1a61d984b81a4cbd1db5ad311f1 2024-12-03T15:22:40,663 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/C of 6c345cf4429e47e0b5ec5adba6afb04a into 034ed1a61d984b81a4cbd1db5ad311f1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:40,663 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:40,663 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/C, priority=12, startTime=1733239359793; duration=0sec 2024-12-03T15:22:40,663 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:40,663 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:C 2024-12-03T15:22:40,701 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/ecdcd15edaa649858fb6cecaf7cc6f74 2024-12-03T15:22:40,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/56c5dd8d0a604d8c8473545c950f3ce4 is 50, key is test_row_0/C:col10/1733239358967/Put/seqid=0 2024-12-03T15:22:40,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742374_1550 (size=12301) 2024-12-03T15:22:40,727 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/56c5dd8d0a604d8c8473545c950f3ce4 2024-12-03T15:22:40,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/28dae1e8074a46bc9f89e10bbb8ed664 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/28dae1e8074a46bc9f89e10bbb8ed664 2024-12-03T15:22:40,738 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/28dae1e8074a46bc9f89e10bbb8ed664, entries=150, sequenceid=388, filesize=12.0 K 2024-12-03T15:22:40,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/ecdcd15edaa649858fb6cecaf7cc6f74 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/ecdcd15edaa649858fb6cecaf7cc6f74 2024-12-03T15:22:40,744 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/ecdcd15edaa649858fb6cecaf7cc6f74, entries=150, sequenceid=388, filesize=12.0 K 2024-12-03T15:22:40,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/56c5dd8d0a604d8c8473545c950f3ce4 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/56c5dd8d0a604d8c8473545c950f3ce4 2024-12-03T15:22:40,749 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/56c5dd8d0a604d8c8473545c950f3ce4, entries=150, sequenceid=388, filesize=12.0 K 2024-12-03T15:22:40,750 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 6c345cf4429e47e0b5ec5adba6afb04a in 871ms, sequenceid=388, compaction requested=false 2024-12-03T15:22:40,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:40,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:40,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-03T15:22:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-03T15:22:40,752 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-03T15:22:40,752 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3300 sec 2024-12-03T15:22:40,754 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.3340 sec 2024-12-03T15:22:40,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:40,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-03T15:22:40,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:40,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:40,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:40,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:40,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:40,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:40,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/6a5d3aa432e74f63a9341298e437053f is 50, key is test_row_0/A:col10/1733239360755/Put/seqid=0 2024-12-03T15:22:40,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742375_1551 (size=17181) 2024-12-03T15:22:40,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239420779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239420780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239420784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239420885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239420885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:40,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:40,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239420888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:41,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:41,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239421089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:41,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:41,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239421090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:41,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:41,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239421093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:41,165 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/6a5d3aa432e74f63a9341298e437053f 2024-12-03T15:22:41,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/72936b7bc48b4e7d8add9de37eca009c is 50, key is test_row_0/B:col10/1733239360755/Put/seqid=0 2024-12-03T15:22:41,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742376_1552 (size=12301) 2024-12-03T15:22:41,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/72936b7bc48b4e7d8add9de37eca009c 2024-12-03T15:22:41,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/f9df8eb10bb644c9875cd4012e972fef is 50, key is test_row_0/C:col10/1733239360755/Put/seqid=0 2024-12-03T15:22:41,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742377_1553 (size=12301) 2024-12-03T15:22:41,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:41,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239421395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:41,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:41,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239421395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:41,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:41,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239421399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:41,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-03T15:22:41,524 INFO [Thread-2095 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-03T15:22:41,526 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:41,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-03T15:22:41,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-03T15:22:41,527 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:41,528 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:41,528 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:41,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/f9df8eb10bb644c9875cd4012e972fef 2024-12-03T15:22:41,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/6a5d3aa432e74f63a9341298e437053f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/6a5d3aa432e74f63a9341298e437053f 2024-12-03T15:22:41,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/6a5d3aa432e74f63a9341298e437053f, entries=250, sequenceid=411, filesize=16.8 K 2024-12-03T15:22:41,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/72936b7bc48b4e7d8add9de37eca009c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/72936b7bc48b4e7d8add9de37eca009c 2024-12-03T15:22:41,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/72936b7bc48b4e7d8add9de37eca009c, entries=150, sequenceid=411, filesize=12.0 K 2024-12-03T15:22:41,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/f9df8eb10bb644c9875cd4012e972fef as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f9df8eb10bb644c9875cd4012e972fef 2024-12-03T15:22:41,619 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f9df8eb10bb644c9875cd4012e972fef, entries=150, sequenceid=411, filesize=12.0 K 2024-12-03T15:22:41,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 6c345cf4429e47e0b5ec5adba6afb04a in 866ms, sequenceid=411, compaction requested=true 2024-12-03T15:22:41,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:41,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:41,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:41,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:41,622 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:41,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:41,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:41,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:41,622 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:41,623 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42499 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:41,623 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/A is initiating minor compaction (all files) 2024-12-03T15:22:41,623 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/A in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:41,623 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:41,624 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/7c1b719a86e54b03a5f7c8b7385bc47b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/28dae1e8074a46bc9f89e10bbb8ed664, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/6a5d3aa432e74f63a9341298e437053f] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=41.5 K 2024-12-03T15:22:41,624 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/B is initiating minor compaction (all files) 2024-12-03T15:22:41,624 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/B in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:41,624 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e746e9be171c4427b71ce52c7965de9e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/ecdcd15edaa649858fb6cecaf7cc6f74, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/72936b7bc48b4e7d8add9de37eca009c] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=36.7 K 2024-12-03T15:22:41,624 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c1b719a86e54b03a5f7c8b7385bc47b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1733239358303 2024-12-03T15:22:41,624 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting e746e9be171c4427b71ce52c7965de9e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1733239358303 2024-12-03T15:22:41,624 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28dae1e8074a46bc9f89e10bbb8ed664, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1733239358949 2024-12-03T15:22:41,625 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting ecdcd15edaa649858fb6cecaf7cc6f74, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1733239358949 2024-12-03T15:22:41,625 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a5d3aa432e74f63a9341298e437053f, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733239360122 2024-12-03T15:22:41,625 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 72936b7bc48b4e7d8add9de37eca009c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733239360134 2024-12-03T15:22:41,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-03T15:22:41,631 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#B#compaction#468 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:41,631 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/177f6a3854a24f2e8fcfe46cb29a346e is 50, key is test_row_0/B:col10/1733239360755/Put/seqid=0 2024-12-03T15:22:41,632 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#A#compaction#469 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:41,632 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/e617896ef87e408b80d59be3e3349adb is 50, key is test_row_0/A:col10/1733239360755/Put/seqid=0 2024-12-03T15:22:41,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742378_1554 (size=13119) 2024-12-03T15:22:41,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742379_1555 (size=13119) 2024-12-03T15:22:41,679 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:41,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-03T15:22:41,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:41,680 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-03T15:22:41,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:41,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:41,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:41,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:41,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:41,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:41,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/f35f60ee5d7c48b59ec8c557e9b77a37 is 50, key is test_row_0/A:col10/1733239360783/Put/seqid=0 2024-12-03T15:22:41,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742380_1556 (size=12301) 2024-12-03T15:22:41,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-03T15:22:41,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:41,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:41,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:41,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239421936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:41,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:41,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239421941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:41,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:41,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239421942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,038 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/177f6a3854a24f2e8fcfe46cb29a346e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/177f6a3854a24f2e8fcfe46cb29a346e 2024-12-03T15:22:42,042 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/e617896ef87e408b80d59be3e3349adb as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/e617896ef87e408b80d59be3e3349adb 2024-12-03T15:22:42,043 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/B of 6c345cf4429e47e0b5ec5adba6afb04a into 177f6a3854a24f2e8fcfe46cb29a346e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:42,043 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:42,043 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/B, priority=13, startTime=1733239361622; duration=0sec 2024-12-03T15:22:42,043 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:42,043 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:B 2024-12-03T15:22:42,044 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:42,044 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:42,044 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/C is initiating minor compaction (all files) 2024-12-03T15:22:42,045 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/C in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:42,045 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/034ed1a61d984b81a4cbd1db5ad311f1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/56c5dd8d0a604d8c8473545c950f3ce4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f9df8eb10bb644c9875cd4012e972fef] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=36.7 K 2024-12-03T15:22:42,045 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 034ed1a61d984b81a4cbd1db5ad311f1, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1733239358303 2024-12-03T15:22:42,046 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 56c5dd8d0a604d8c8473545c950f3ce4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1733239358949 2024-12-03T15:22:42,046 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting f9df8eb10bb644c9875cd4012e972fef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733239360134 2024-12-03T15:22:42,046 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/A of 6c345cf4429e47e0b5ec5adba6afb04a into e617896ef87e408b80d59be3e3349adb(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:42,046 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:42,046 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/A, priority=13, startTime=1733239361622; duration=0sec 2024-12-03T15:22:42,046 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:42,047 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:A 2024-12-03T15:22:42,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239422043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,052 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#C#compaction#471 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:42,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239422048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,053 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/ee7df67e857849b3a0d3157d5d69f376 is 50, key is test_row_0/C:col10/1733239360755/Put/seqid=0 2024-12-03T15:22:42,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742381_1557 (size=13119) 2024-12-03T15:22:42,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239422051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,087 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/f35f60ee5d7c48b59ec8c557e9b77a37 2024-12-03T15:22:42,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/b0446269412a4463987ddd6910aa1c94 is 50, key is test_row_0/B:col10/1733239360783/Put/seqid=0 2024-12-03T15:22:42,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742382_1558 (size=12301) 2024-12-03T15:22:42,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-03T15:22:42,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239422250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239422254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239422258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,460 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/ee7df67e857849b3a0d3157d5d69f376 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/ee7df67e857849b3a0d3157d5d69f376 2024-12-03T15:22:42,464 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/C of 6c345cf4429e47e0b5ec5adba6afb04a into ee7df67e857849b3a0d3157d5d69f376(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:42,464 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:42,464 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/C, priority=13, startTime=1733239361622; duration=0sec 2024-12-03T15:22:42,464 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:42,464 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:C 2024-12-03T15:22:42,497 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/b0446269412a4463987ddd6910aa1c94 2024-12-03T15:22:42,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/4a7ad14b36c14e82845ef359f9b2f1ba is 50, key is test_row_0/C:col10/1733239360783/Put/seqid=0 2024-12-03T15:22:42,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742383_1559 (size=12301) 2024-12-03T15:22:42,511 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/4a7ad14b36c14e82845ef359f9b2f1ba 2024-12-03T15:22:42,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/f35f60ee5d7c48b59ec8c557e9b77a37 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/f35f60ee5d7c48b59ec8c557e9b77a37 2024-12-03T15:22:42,524 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/f35f60ee5d7c48b59ec8c557e9b77a37, entries=150, sequenceid=427, filesize=12.0 K 2024-12-03T15:22:42,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/b0446269412a4463987ddd6910aa1c94 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/b0446269412a4463987ddd6910aa1c94 2024-12-03T15:22:42,531 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/b0446269412a4463987ddd6910aa1c94, entries=150, sequenceid=427, filesize=12.0 K 2024-12-03T15:22:42,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/4a7ad14b36c14e82845ef359f9b2f1ba as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/4a7ad14b36c14e82845ef359f9b2f1ba 2024-12-03T15:22:42,547 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/4a7ad14b36c14e82845ef359f9b2f1ba, entries=150, sequenceid=427, filesize=12.0 K 2024-12-03T15:22:42,548 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 6c345cf4429e47e0b5ec5adba6afb04a in 868ms, sequenceid=427, compaction requested=false 2024-12-03T15:22:42,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:42,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:42,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-03T15:22:42,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-03T15:22:42,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-03T15:22:42,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0210 sec 2024-12-03T15:22:42,551 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.0250 sec 2024-12-03T15:22:42,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:42,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-03T15:22:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:42,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/43ab8f99151f4a018dcc127beb4e6a8d is 50, key is test_row_0/A:col10/1733239362561/Put/seqid=0 2024-12-03T15:22:42,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742384_1560 (size=14741) 2024-12-03T15:22:42,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239422585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239422588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239422588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-03T15:22:42,630 INFO [Thread-2095 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-03T15:22:42,631 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:42,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-03T15:22:42,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-03T15:22:42,632 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:42,633 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:42,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:42,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239422690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239422693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239422693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-03T15:22:42,784 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-03T15:22:42,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:42,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:42,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:42,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:42,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:42,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239422898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239422899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:42,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239422899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-03T15:22:42,937 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:42,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-03T15:22:42,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:42,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:42,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:42,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:42,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:42,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/43ab8f99151f4a018dcc127beb4e6a8d 2024-12-03T15:22:42,994 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/c3b1f164c77f40b5a2a87b83aaed270d is 50, key is test_row_0/B:col10/1733239362561/Put/seqid=0 2024-12-03T15:22:42,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742385_1561 (size=12301) 2024-12-03T15:22:42,998 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/c3b1f164c77f40b5a2a87b83aaed270d 2024-12-03T15:22:43,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/1b0593ae558846d4af353abfdd98d40d is 50, key is test_row_0/C:col10/1733239362561/Put/seqid=0 2024-12-03T15:22:43,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742386_1562 (size=12301) 2024-12-03T15:22:43,089 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:43,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-03T15:22:43,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:43,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:43,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:43,090 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:43,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:43,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:43,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:43,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60972 deadline: 1733239423204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:43,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:43,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32778 deadline: 1733239423204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:43,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:43,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32772 deadline: 1733239423205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:43,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-03T15:22:43,245 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:43,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-03T15:22:43,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:43,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:43,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:43,246 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:43,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:43,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:43,399 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:43,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-03T15:22:43,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:43,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:43,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:43,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:43,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:43,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:43,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/1b0593ae558846d4af353abfdd98d40d 2024-12-03T15:22:43,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/43ab8f99151f4a018dcc127beb4e6a8d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/43ab8f99151f4a018dcc127beb4e6a8d 2024-12-03T15:22:43,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/43ab8f99151f4a018dcc127beb4e6a8d, entries=200, sequenceid=451, filesize=14.4 K 2024-12-03T15:22:43,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/c3b1f164c77f40b5a2a87b83aaed270d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/c3b1f164c77f40b5a2a87b83aaed270d 2024-12-03T15:22:43,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/c3b1f164c77f40b5a2a87b83aaed270d, entries=150, sequenceid=451, filesize=12.0 K 2024-12-03T15:22:43,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/1b0593ae558846d4af353abfdd98d40d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/1b0593ae558846d4af353abfdd98d40d 2024-12-03T15:22:43,426 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/1b0593ae558846d4af353abfdd98d40d, entries=150, sequenceid=451, filesize=12.0 K 2024-12-03T15:22:43,426 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 6c345cf4429e47e0b5ec5adba6afb04a in 864ms, sequenceid=451, compaction requested=true 2024-12-03T15:22:43,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:43,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:43,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:43,427 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:43,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:43,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:43,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6c345cf4429e47e0b5ec5adba6afb04a:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:43,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:43,427 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:43,427 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:43,427 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40161 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:43,427 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/B is initiating minor compaction (all files) 2024-12-03T15:22:43,427 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/A is initiating minor compaction (all files) 2024-12-03T15:22:43,428 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/B in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:43,428 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/A in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:43,428 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/177f6a3854a24f2e8fcfe46cb29a346e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/b0446269412a4463987ddd6910aa1c94, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/c3b1f164c77f40b5a2a87b83aaed270d] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=36.8 K 2024-12-03T15:22:43,428 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/e617896ef87e408b80d59be3e3349adb, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/f35f60ee5d7c48b59ec8c557e9b77a37, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/43ab8f99151f4a018dcc127beb4e6a8d] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=39.2 K 2024-12-03T15:22:43,428 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 177f6a3854a24f2e8fcfe46cb29a346e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733239360134 2024-12-03T15:22:43,428 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting e617896ef87e408b80d59be3e3349adb, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733239360134 2024-12-03T15:22:43,428 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting f35f60ee5d7c48b59ec8c557e9b77a37, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733239360768 2024-12-03T15:22:43,428 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b0446269412a4463987ddd6910aa1c94, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733239360768 2024-12-03T15:22:43,428 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43ab8f99151f4a018dcc127beb4e6a8d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1733239361925 2024-12-03T15:22:43,428 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c3b1f164c77f40b5a2a87b83aaed270d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1733239361925 2024-12-03T15:22:43,434 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#A#compaction#477 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:43,434 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/38b364a304c8417aaa14d939cd255f92 is 50, key is test_row_0/A:col10/1733239362561/Put/seqid=0 2024-12-03T15:22:43,435 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#B#compaction#478 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:43,435 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/beb99c11c0264eff900f8b8b00df040c is 50, key is test_row_0/B:col10/1733239362561/Put/seqid=0 2024-12-03T15:22:43,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742387_1563 (size=13221) 2024-12-03T15:22:43,460 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/38b364a304c8417aaa14d939cd255f92 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/38b364a304c8417aaa14d939cd255f92 2024-12-03T15:22:43,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742388_1564 (size=13221) 2024-12-03T15:22:43,467 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/A of 6c345cf4429e47e0b5ec5adba6afb04a into 38b364a304c8417aaa14d939cd255f92(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:43,467 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:43,467 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/A, priority=13, startTime=1733239363426; duration=0sec 2024-12-03T15:22:43,467 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:43,468 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:A 2024-12-03T15:22:43,468 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:43,470 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/beb99c11c0264eff900f8b8b00df040c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/beb99c11c0264eff900f8b8b00df040c 2024-12-03T15:22:43,470 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:43,470 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 6c345cf4429e47e0b5ec5adba6afb04a/C is initiating minor compaction (all files) 2024-12-03T15:22:43,470 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6c345cf4429e47e0b5ec5adba6afb04a/C in TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:43,470 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/ee7df67e857849b3a0d3157d5d69f376, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/4a7ad14b36c14e82845ef359f9b2f1ba, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/1b0593ae558846d4af353abfdd98d40d] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp, totalSize=36.8 K 2024-12-03T15:22:43,471 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee7df67e857849b3a0d3157d5d69f376, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733239360134 2024-12-03T15:22:43,471 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a7ad14b36c14e82845ef359f9b2f1ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733239360768 2024-12-03T15:22:43,472 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b0593ae558846d4af353abfdd98d40d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1733239361925 2024-12-03T15:22:43,475 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/B of 6c345cf4429e47e0b5ec5adba6afb04a into beb99c11c0264eff900f8b8b00df040c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:43,475 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:43,475 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/B, priority=13, startTime=1733239363427; duration=0sec 2024-12-03T15:22:43,475 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:43,475 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:B 2024-12-03T15:22:43,478 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6c345cf4429e47e0b5ec5adba6afb04a#C#compaction#479 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:43,479 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/21f440ad0b254d0fa11a4ccbc980e7a6 is 50, key is test_row_0/C:col10/1733239362561/Put/seqid=0 2024-12-03T15:22:43,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742389_1565 (size=13221) 2024-12-03T15:22:43,509 DEBUG [Thread-2096 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7861b162 to 127.0.0.1:60989 2024-12-03T15:22:43,509 DEBUG [Thread-2096 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:43,511 DEBUG [Thread-2104 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79d49886 to 127.0.0.1:60989 2024-12-03T15:22:43,511 DEBUG [Thread-2104 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:43,511 DEBUG [Thread-2102 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x054c2725 to 127.0.0.1:60989 2024-12-03T15:22:43,511 DEBUG [Thread-2102 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:43,512 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/21f440ad0b254d0fa11a4ccbc980e7a6 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/21f440ad0b254d0fa11a4ccbc980e7a6 2024-12-03T15:22:43,513 DEBUG [Thread-2098 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x154f0f85 to 127.0.0.1:60989 2024-12-03T15:22:43,513 DEBUG [Thread-2098 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:43,513 DEBUG [Thread-2100 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x008a917b to 127.0.0.1:60989 2024-12-03T15:22:43,513 DEBUG [Thread-2100 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:43,517 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6c345cf4429e47e0b5ec5adba6afb04a/C of 6c345cf4429e47e0b5ec5adba6afb04a into 21f440ad0b254d0fa11a4ccbc980e7a6(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:43,517 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:43,517 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a., storeName=6c345cf4429e47e0b5ec5adba6afb04a/C, priority=13, startTime=1733239363427; duration=0sec 2024-12-03T15:22:43,517 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:43,517 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6c345cf4429e47e0b5ec5adba6afb04a:C 2024-12-03T15:22:43,551 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:43,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-03T15:22:43,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:43,552 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-03T15:22:43,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:43,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:43,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:43,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:43,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:43,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:43,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/1d65ae46c418490eac190c8d0cfc23a3 is 50, key is test_row_0/A:col10/1733239362587/Put/seqid=0 2024-12-03T15:22:43,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742390_1566 (size=12301) 2024-12-03T15:22:43,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. as already flushing 2024-12-03T15:22:43,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:43,710 DEBUG [Thread-2085 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ef40578 to 127.0.0.1:60989 2024-12-03T15:22:43,710 DEBUG [Thread-2085 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:43,711 DEBUG [Thread-2093 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bc486e1 to 127.0.0.1:60989 2024-12-03T15:22:43,711 DEBUG [Thread-2093 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:43,713 DEBUG [Thread-2089 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06bc0f7c to 127.0.0.1:60989 2024-12-03T15:22:43,713 DEBUG [Thread-2089 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:43,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-03T15:22:43,959 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/1d65ae46c418490eac190c8d0cfc23a3 2024-12-03T15:22:43,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/c4dc0c4fa21f439ebca898dace231ca3 is 50, key is test_row_0/B:col10/1733239362587/Put/seqid=0 2024-12-03T15:22:43,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742391_1567 (size=12301) 2024-12-03T15:22:44,022 DEBUG [Thread-2091 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8b6e04 to 127.0.0.1:60989 2024-12-03T15:22:44,022 DEBUG [Thread-2091 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:44,087 DEBUG [Thread-2087 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x032bb71c to 127.0.0.1:60989 2024-12-03T15:22:44,087 DEBUG [Thread-2087 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:44,376 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/c4dc0c4fa21f439ebca898dace231ca3 2024-12-03T15:22:44,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/b11d823f9d974bcc8f2d13de99b970c0 is 50, key is test_row_0/C:col10/1733239362587/Put/seqid=0 2024-12-03T15:22:44,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742392_1568 (size=12301) 2024-12-03T15:22:44,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-03T15:22:44,784 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/b11d823f9d974bcc8f2d13de99b970c0 2024-12-03T15:22:44,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/1d65ae46c418490eac190c8d0cfc23a3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/1d65ae46c418490eac190c8d0cfc23a3 2024-12-03T15:22:44,791 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/1d65ae46c418490eac190c8d0cfc23a3, entries=150, sequenceid=469, filesize=12.0 K 2024-12-03T15:22:44,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/c4dc0c4fa21f439ebca898dace231ca3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/c4dc0c4fa21f439ebca898dace231ca3 2024-12-03T15:22:44,794 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/c4dc0c4fa21f439ebca898dace231ca3, entries=150, sequenceid=469, filesize=12.0 K 2024-12-03T15:22:44,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/b11d823f9d974bcc8f2d13de99b970c0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b11d823f9d974bcc8f2d13de99b970c0 2024-12-03T15:22:44,797 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b11d823f9d974bcc8f2d13de99b970c0, entries=150, sequenceid=469, filesize=12.0 K 2024-12-03T15:22:44,798 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=33.54 KB/34350 for 6c345cf4429e47e0b5ec5adba6afb04a in 1245ms, sequenceid=469, compaction requested=false 2024-12-03T15:22:44,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:44,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:44,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-03T15:22:44,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-03T15:22:44,799 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-03T15:22:44,800 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1660 sec 2024-12-03T15:22:44,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 2.1690 sec 2024-12-03T15:22:46,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-03T15:22:46,738 INFO [Thread-2095 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 111 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 16 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 120 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 7 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 120 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1956 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5868 rows 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1948 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5844 rows 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1951 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5853 rows 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1944 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5832 rows 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1948 2024-12-03T15:22:46,739 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5844 rows 2024-12-03T15:22:46,739 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-03T15:22:46,739 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32c12a30 to 127.0.0.1:60989 2024-12-03T15:22:46,739 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:22:46,741 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-03T15:22:46,742 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-03T15:22:46,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:46,752 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239366752"}]},"ts":"1733239366752"} 2024-12-03T15:22:46,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-03T15:22:46,754 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-03T15:22:46,757 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-03T15:22:46,758 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-03T15:22:46,759 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6c345cf4429e47e0b5ec5adba6afb04a, UNASSIGN}] 2024-12-03T15:22:46,760 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=137, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6c345cf4429e47e0b5ec5adba6afb04a, UNASSIGN 2024-12-03T15:22:46,760 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=6c345cf4429e47e0b5ec5adba6afb04a, regionState=CLOSING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:46,761 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T15:22:46,761 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; CloseRegionProcedure 6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:22:46,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-03T15:22:46,912 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:46,913 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] handler.UnassignRegionHandler(124): Close 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:46,913 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-03T15:22:46,913 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1681): Closing 6c345cf4429e47e0b5ec5adba6afb04a, disabling compactions & flushes 2024-12-03T15:22:46,913 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:46,913 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:46,913 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. after waiting 0 ms 2024-12-03T15:22:46,913 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:46,913 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(2837): Flushing 6c345cf4429e47e0b5ec5adba6afb04a 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-03T15:22:46,914 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=A 2024-12-03T15:22:46,914 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:46,914 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=B 2024-12-03T15:22:46,914 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:46,914 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6c345cf4429e47e0b5ec5adba6afb04a, store=C 2024-12-03T15:22:46,914 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:46,917 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/68394ebf25f54b9d89254ec608653384 is 50, key is test_row_0/A:col10/1733239364086/Put/seqid=0 2024-12-03T15:22:46,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742393_1569 (size=9857) 2024-12-03T15:22:47,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-03T15:22:47,321 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/68394ebf25f54b9d89254ec608653384 2024-12-03T15:22:47,327 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/e637f28746574335a54622e6cfd20c23 is 50, key is test_row_0/B:col10/1733239364086/Put/seqid=0 2024-12-03T15:22:47,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742394_1570 (size=9857) 2024-12-03T15:22:47,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-03T15:22:47,731 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/e637f28746574335a54622e6cfd20c23 2024-12-03T15:22:47,738 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/6daaef83ab5f40f3a17f784c09a222eb is 50, key is test_row_0/C:col10/1733239364086/Put/seqid=0 2024-12-03T15:22:47,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742395_1571 (size=9857) 2024-12-03T15:22:47,742 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=477 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/6daaef83ab5f40f3a17f784c09a222eb 2024-12-03T15:22:47,746 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/A/68394ebf25f54b9d89254ec608653384 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/68394ebf25f54b9d89254ec608653384 2024-12-03T15:22:47,750 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/68394ebf25f54b9d89254ec608653384, entries=100, sequenceid=477, filesize=9.6 K 2024-12-03T15:22:47,751 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/B/e637f28746574335a54622e6cfd20c23 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e637f28746574335a54622e6cfd20c23 2024-12-03T15:22:47,754 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e637f28746574335a54622e6cfd20c23, entries=100, sequenceid=477, filesize=9.6 K 2024-12-03T15:22:47,754 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/.tmp/C/6daaef83ab5f40f3a17f784c09a222eb as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/6daaef83ab5f40f3a17f784c09a222eb 2024-12-03T15:22:47,757 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/6daaef83ab5f40f3a17f784c09a222eb, entries=100, sequenceid=477, filesize=9.6 K 2024-12-03T15:22:47,757 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 6c345cf4429e47e0b5ec5adba6afb04a in 844ms, sequenceid=477, compaction requested=true 2024-12-03T15:22:47,758 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/2eaf94cd72e04880ae63c49618f6184c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/81880de33eff4a868b9a9b8482d0f0b0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3472c3fe684346edbfc479862d42746b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/370cfa2378e14e2284b4fa8a25206c19, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/1e4f059743bd47b686d69925436b58f2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/d3397f3b0fd54ccb9c929da8706c0309, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/afb664162bca463cbe766f8396c1fd9c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/c186b019a35c465382e0966f1aa2059d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/ea4c5e470c354544bed404b68825bda5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/ad9bc6a57bad4e469056112eaca888c8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3e1befccc5454b41bf2ccae50c175c3c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3df4ea412a7a4e6ea997a215230656f2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/35840e7ff84f45678984804bbd563392, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/0f3e7c6f65334b73bd5f71228bb18c54, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/39ba66fb8a0c48b99a48fdfd3d523304, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/6741fdd585e345ce9977930bc6f13bca, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/14fe10421c074594bad9bde413514e3d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/fd002742156c46ad951dc3227981e9a1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/2632635d010e4648ac4e6a2bb2593889, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/786114cdf9b34076a4d12cc1e9fd6b92, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/b68d0113e9a74091bb90b67bb94a77aa, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/7c1b719a86e54b03a5f7c8b7385bc47b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/28dae1e8074a46bc9f89e10bbb8ed664, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/6a5d3aa432e74f63a9341298e437053f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/e617896ef87e408b80d59be3e3349adb, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/f35f60ee5d7c48b59ec8c557e9b77a37, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/43ab8f99151f4a018dcc127beb4e6a8d] to archive 2024-12-03T15:22:47,759 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:22:47,760 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/2eaf94cd72e04880ae63c49618f6184c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/2eaf94cd72e04880ae63c49618f6184c 2024-12-03T15:22:47,761 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/81880de33eff4a868b9a9b8482d0f0b0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/81880de33eff4a868b9a9b8482d0f0b0 2024-12-03T15:22:47,762 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3472c3fe684346edbfc479862d42746b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3472c3fe684346edbfc479862d42746b 2024-12-03T15:22:47,763 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/370cfa2378e14e2284b4fa8a25206c19 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/370cfa2378e14e2284b4fa8a25206c19 2024-12-03T15:22:47,764 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/1e4f059743bd47b686d69925436b58f2 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/1e4f059743bd47b686d69925436b58f2 2024-12-03T15:22:47,765 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/d3397f3b0fd54ccb9c929da8706c0309 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/d3397f3b0fd54ccb9c929da8706c0309 2024-12-03T15:22:47,766 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/afb664162bca463cbe766f8396c1fd9c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/afb664162bca463cbe766f8396c1fd9c 2024-12-03T15:22:47,767 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/c186b019a35c465382e0966f1aa2059d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/c186b019a35c465382e0966f1aa2059d 2024-12-03T15:22:47,768 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/ea4c5e470c354544bed404b68825bda5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/ea4c5e470c354544bed404b68825bda5 2024-12-03T15:22:47,769 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/ad9bc6a57bad4e469056112eaca888c8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/ad9bc6a57bad4e469056112eaca888c8 2024-12-03T15:22:47,770 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3e1befccc5454b41bf2ccae50c175c3c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3e1befccc5454b41bf2ccae50c175c3c 2024-12-03T15:22:47,771 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3df4ea412a7a4e6ea997a215230656f2 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/3df4ea412a7a4e6ea997a215230656f2 2024-12-03T15:22:47,772 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/35840e7ff84f45678984804bbd563392 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/35840e7ff84f45678984804bbd563392 2024-12-03T15:22:47,773 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/0f3e7c6f65334b73bd5f71228bb18c54 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/0f3e7c6f65334b73bd5f71228bb18c54 2024-12-03T15:22:47,774 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/39ba66fb8a0c48b99a48fdfd3d523304 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/39ba66fb8a0c48b99a48fdfd3d523304 2024-12-03T15:22:47,774 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/6741fdd585e345ce9977930bc6f13bca to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/6741fdd585e345ce9977930bc6f13bca 2024-12-03T15:22:47,775 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/14fe10421c074594bad9bde413514e3d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/14fe10421c074594bad9bde413514e3d 2024-12-03T15:22:47,776 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/fd002742156c46ad951dc3227981e9a1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/fd002742156c46ad951dc3227981e9a1 2024-12-03T15:22:47,777 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/2632635d010e4648ac4e6a2bb2593889 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/2632635d010e4648ac4e6a2bb2593889 2024-12-03T15:22:47,778 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/786114cdf9b34076a4d12cc1e9fd6b92 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/786114cdf9b34076a4d12cc1e9fd6b92 2024-12-03T15:22:47,779 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/b68d0113e9a74091bb90b67bb94a77aa to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/b68d0113e9a74091bb90b67bb94a77aa 2024-12-03T15:22:47,779 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/7c1b719a86e54b03a5f7c8b7385bc47b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/7c1b719a86e54b03a5f7c8b7385bc47b 2024-12-03T15:22:47,780 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/28dae1e8074a46bc9f89e10bbb8ed664 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/28dae1e8074a46bc9f89e10bbb8ed664 2024-12-03T15:22:47,781 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/6a5d3aa432e74f63a9341298e437053f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/6a5d3aa432e74f63a9341298e437053f 2024-12-03T15:22:47,782 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/e617896ef87e408b80d59be3e3349adb to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/e617896ef87e408b80d59be3e3349adb 2024-12-03T15:22:47,782 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/f35f60ee5d7c48b59ec8c557e9b77a37 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/f35f60ee5d7c48b59ec8c557e9b77a37 2024-12-03T15:22:47,783 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/43ab8f99151f4a018dcc127beb4e6a8d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/43ab8f99151f4a018dcc127beb4e6a8d 2024-12-03T15:22:47,784 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/fe7e544c9eaa4c92ab8b31346a0cd373, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/1f6dfe7771c643d590e30c525b006603, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8cb4e5fccd794bef8d3afafa795c32d8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/cb5e833cdd21430ca49c2cb1b020b24f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/58d42746f8f44d1c86e608d4572cb78d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/71c0a82ea7c04eb9937ef50748087626, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e25c1f3e189d4a7091df89aacacef7d6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/6e71a8f6b086461fb4910a485f9fb85d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8bac8f5861e948d48a4933aab839f58a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/96896503477a4a0bbf59dc880c6c9d3f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/52cca6002a0540ada528ee42ebbbd34a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/896844d8278849f39d3d98085a3727f0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/2588173504844b27b17d2b30988d8b25, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/53838be22efd45a98243fee452451f4a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/88d31e482f0047c9b99edb45a1b7aae1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/9b365b8b56664d0eb524dd8f2c34688f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/9723ab10b1dd4b569852e64fd9dd7550, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/061332ee317e4a24abfaaf3e6c468893, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8cba4e811fb34abd84517cc3e863144f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/11357f67eed24ef1851c71464a611c64, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e746e9be171c4427b71ce52c7965de9e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/156fcc85a15747db9d3f47776e5b2b38, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/ecdcd15edaa649858fb6cecaf7cc6f74, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/177f6a3854a24f2e8fcfe46cb29a346e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/72936b7bc48b4e7d8add9de37eca009c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/b0446269412a4463987ddd6910aa1c94, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/c3b1f164c77f40b5a2a87b83aaed270d] to archive 2024-12-03T15:22:47,785 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:22:47,786 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/fe7e544c9eaa4c92ab8b31346a0cd373 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/fe7e544c9eaa4c92ab8b31346a0cd373 2024-12-03T15:22:47,787 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/1f6dfe7771c643d590e30c525b006603 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/1f6dfe7771c643d590e30c525b006603 2024-12-03T15:22:47,787 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8cb4e5fccd794bef8d3afafa795c32d8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8cb4e5fccd794bef8d3afafa795c32d8 2024-12-03T15:22:47,788 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/cb5e833cdd21430ca49c2cb1b020b24f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/cb5e833cdd21430ca49c2cb1b020b24f 2024-12-03T15:22:47,789 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/58d42746f8f44d1c86e608d4572cb78d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/58d42746f8f44d1c86e608d4572cb78d 2024-12-03T15:22:47,790 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/71c0a82ea7c04eb9937ef50748087626 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/71c0a82ea7c04eb9937ef50748087626 2024-12-03T15:22:47,790 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e25c1f3e189d4a7091df89aacacef7d6 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e25c1f3e189d4a7091df89aacacef7d6 2024-12-03T15:22:47,791 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/6e71a8f6b086461fb4910a485f9fb85d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/6e71a8f6b086461fb4910a485f9fb85d 2024-12-03T15:22:47,792 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8bac8f5861e948d48a4933aab839f58a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8bac8f5861e948d48a4933aab839f58a 2024-12-03T15:22:47,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/96896503477a4a0bbf59dc880c6c9d3f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/96896503477a4a0bbf59dc880c6c9d3f 2024-12-03T15:22:47,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/52cca6002a0540ada528ee42ebbbd34a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/52cca6002a0540ada528ee42ebbbd34a 2024-12-03T15:22:47,794 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/896844d8278849f39d3d98085a3727f0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/896844d8278849f39d3d98085a3727f0 2024-12-03T15:22:47,795 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/2588173504844b27b17d2b30988d8b25 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/2588173504844b27b17d2b30988d8b25 2024-12-03T15:22:47,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/53838be22efd45a98243fee452451f4a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/53838be22efd45a98243fee452451f4a 2024-12-03T15:22:47,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/88d31e482f0047c9b99edb45a1b7aae1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/88d31e482f0047c9b99edb45a1b7aae1 2024-12-03T15:22:47,797 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/9b365b8b56664d0eb524dd8f2c34688f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/9b365b8b56664d0eb524dd8f2c34688f 2024-12-03T15:22:47,798 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/9723ab10b1dd4b569852e64fd9dd7550 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/9723ab10b1dd4b569852e64fd9dd7550 2024-12-03T15:22:47,799 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/061332ee317e4a24abfaaf3e6c468893 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/061332ee317e4a24abfaaf3e6c468893 2024-12-03T15:22:47,799 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8cba4e811fb34abd84517cc3e863144f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/8cba4e811fb34abd84517cc3e863144f 2024-12-03T15:22:47,800 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/11357f67eed24ef1851c71464a611c64 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/11357f67eed24ef1851c71464a611c64 2024-12-03T15:22:47,801 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e746e9be171c4427b71ce52c7965de9e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e746e9be171c4427b71ce52c7965de9e 2024-12-03T15:22:47,802 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/156fcc85a15747db9d3f47776e5b2b38 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/156fcc85a15747db9d3f47776e5b2b38 2024-12-03T15:22:47,803 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/ecdcd15edaa649858fb6cecaf7cc6f74 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/ecdcd15edaa649858fb6cecaf7cc6f74 2024-12-03T15:22:47,803 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/177f6a3854a24f2e8fcfe46cb29a346e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/177f6a3854a24f2e8fcfe46cb29a346e 2024-12-03T15:22:47,804 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/72936b7bc48b4e7d8add9de37eca009c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/72936b7bc48b4e7d8add9de37eca009c 2024-12-03T15:22:47,805 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/b0446269412a4463987ddd6910aa1c94 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/b0446269412a4463987ddd6910aa1c94 2024-12-03T15:22:47,806 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/c3b1f164c77f40b5a2a87b83aaed270d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/c3b1f164c77f40b5a2a87b83aaed270d 2024-12-03T15:22:47,807 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/37af28ae854a4b58a3c3304ffd10b3c5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/e5b578e3fc954e54ad2bdf270455851b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/dd1b5ce89e83467981aec547c2f7c706, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/5992d1650e4b495a955eb39caf751d28, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/300b26b2995d4a428c2223488829385b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/2ad28278ffba4857a842160fbb2c212f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/946b4379ae474add81e220b664f4de70, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/e4408a45ffee4efb9111b0209b7d842f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/3d663543847a45ee8062df7f0eba326d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/0e8b28b5f1e045b785ef096257a5e9d4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/123b9f93249e42f2ba1748f16407ccc5, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/19be25d22a124b7491e30a091d17a239, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/c6ad45b4d4a94f62a567601ee5e66fd6, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b829e8aa022a4538b13f77516188aec1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/14a988eb774b4a30aed5fbb11051cd61, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/13d13d86628e4ca19156bf6e752caec7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f951de277d6a4f8fb1c7313ab0c58840, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b058876b00ca4e5fb030af26d2a64ae0, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/184398c37ed14e52add8d423ba976e49, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/4ab3f4a546974257bcb1c8425de3041e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/034ed1a61d984b81a4cbd1db5ad311f1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/6c7c2c59a93e486c86716c9aa394de9a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/56c5dd8d0a604d8c8473545c950f3ce4, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/ee7df67e857849b3a0d3157d5d69f376, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f9df8eb10bb644c9875cd4012e972fef, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/4a7ad14b36c14e82845ef359f9b2f1ba, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/1b0593ae558846d4af353abfdd98d40d] to archive 2024-12-03T15:22:47,807 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:22:47,808 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/37af28ae854a4b58a3c3304ffd10b3c5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/37af28ae854a4b58a3c3304ffd10b3c5 2024-12-03T15:22:47,809 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/e5b578e3fc954e54ad2bdf270455851b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/e5b578e3fc954e54ad2bdf270455851b 2024-12-03T15:22:47,810 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/dd1b5ce89e83467981aec547c2f7c706 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/dd1b5ce89e83467981aec547c2f7c706 2024-12-03T15:22:47,811 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/5992d1650e4b495a955eb39caf751d28 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/5992d1650e4b495a955eb39caf751d28 2024-12-03T15:22:47,812 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/300b26b2995d4a428c2223488829385b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/300b26b2995d4a428c2223488829385b 2024-12-03T15:22:47,812 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/2ad28278ffba4857a842160fbb2c212f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/2ad28278ffba4857a842160fbb2c212f 2024-12-03T15:22:47,813 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/946b4379ae474add81e220b664f4de70 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/946b4379ae474add81e220b664f4de70 2024-12-03T15:22:47,814 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/e4408a45ffee4efb9111b0209b7d842f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/e4408a45ffee4efb9111b0209b7d842f 2024-12-03T15:22:47,815 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/3d663543847a45ee8062df7f0eba326d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/3d663543847a45ee8062df7f0eba326d 2024-12-03T15:22:47,816 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/0e8b28b5f1e045b785ef096257a5e9d4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/0e8b28b5f1e045b785ef096257a5e9d4 2024-12-03T15:22:47,817 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/123b9f93249e42f2ba1748f16407ccc5 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/123b9f93249e42f2ba1748f16407ccc5 2024-12-03T15:22:47,817 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/19be25d22a124b7491e30a091d17a239 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/19be25d22a124b7491e30a091d17a239 2024-12-03T15:22:47,818 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/c6ad45b4d4a94f62a567601ee5e66fd6 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/c6ad45b4d4a94f62a567601ee5e66fd6 2024-12-03T15:22:47,819 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b829e8aa022a4538b13f77516188aec1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b829e8aa022a4538b13f77516188aec1 2024-12-03T15:22:47,820 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/14a988eb774b4a30aed5fbb11051cd61 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/14a988eb774b4a30aed5fbb11051cd61 2024-12-03T15:22:47,821 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/13d13d86628e4ca19156bf6e752caec7 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/13d13d86628e4ca19156bf6e752caec7 2024-12-03T15:22:47,822 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f951de277d6a4f8fb1c7313ab0c58840 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f951de277d6a4f8fb1c7313ab0c58840 2024-12-03T15:22:47,823 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b058876b00ca4e5fb030af26d2a64ae0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b058876b00ca4e5fb030af26d2a64ae0 2024-12-03T15:22:47,823 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/184398c37ed14e52add8d423ba976e49 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/184398c37ed14e52add8d423ba976e49 2024-12-03T15:22:47,824 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/4ab3f4a546974257bcb1c8425de3041e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/4ab3f4a546974257bcb1c8425de3041e 2024-12-03T15:22:47,825 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/034ed1a61d984b81a4cbd1db5ad311f1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/034ed1a61d984b81a4cbd1db5ad311f1 2024-12-03T15:22:47,826 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/6c7c2c59a93e486c86716c9aa394de9a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/6c7c2c59a93e486c86716c9aa394de9a 2024-12-03T15:22:47,827 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/56c5dd8d0a604d8c8473545c950f3ce4 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/56c5dd8d0a604d8c8473545c950f3ce4 2024-12-03T15:22:47,828 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/ee7df67e857849b3a0d3157d5d69f376 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/ee7df67e857849b3a0d3157d5d69f376 2024-12-03T15:22:47,829 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f9df8eb10bb644c9875cd4012e972fef to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/f9df8eb10bb644c9875cd4012e972fef 2024-12-03T15:22:47,829 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/4a7ad14b36c14e82845ef359f9b2f1ba to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/4a7ad14b36c14e82845ef359f9b2f1ba 2024-12-03T15:22:47,830 DEBUG [StoreCloser-TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/1b0593ae558846d4af353abfdd98d40d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/1b0593ae558846d4af353abfdd98d40d 2024-12-03T15:22:47,834 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/recovered.edits/480.seqid, newMaxSeqId=480, maxSeqId=1 2024-12-03T15:22:47,835 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a. 2024-12-03T15:22:47,835 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] regionserver.HRegion(1635): Region close journal for 6c345cf4429e47e0b5ec5adba6afb04a: 2024-12-03T15:22:47,836 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=139}] handler.UnassignRegionHandler(170): Closed 6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:47,836 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=6c345cf4429e47e0b5ec5adba6afb04a, regionState=CLOSED 2024-12-03T15:22:47,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-03T15:22:47,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; CloseRegionProcedure 6c345cf4429e47e0b5ec5adba6afb04a, server=2b5ef621a0dd,46815,1733239226292 in 1.0760 sec 2024-12-03T15:22:47,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=138, resume processing ppid=137 2024-12-03T15:22:47,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=137, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6c345cf4429e47e0b5ec5adba6afb04a, UNASSIGN in 1.0790 sec 2024-12-03T15:22:47,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-03T15:22:47,841 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.0810 sec 2024-12-03T15:22:47,841 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239367841"}]},"ts":"1733239367841"} 2024-12-03T15:22:47,842 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-03T15:22:47,844 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-03T15:22:47,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.1030 sec 2024-12-03T15:22:47,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-03T15:22:47,856 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-03T15:22:47,857 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-03T15:22:47,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:47,858 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=140, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:47,859 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=140, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:47,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-03T15:22:47,861 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:47,863 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/recovered.edits] 2024-12-03T15:22:47,865 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/1d65ae46c418490eac190c8d0cfc23a3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/1d65ae46c418490eac190c8d0cfc23a3 2024-12-03T15:22:47,866 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/38b364a304c8417aaa14d939cd255f92 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/38b364a304c8417aaa14d939cd255f92 2024-12-03T15:22:47,867 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/68394ebf25f54b9d89254ec608653384 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/A/68394ebf25f54b9d89254ec608653384 2024-12-03T15:22:47,869 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/beb99c11c0264eff900f8b8b00df040c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/beb99c11c0264eff900f8b8b00df040c 2024-12-03T15:22:47,870 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/c4dc0c4fa21f439ebca898dace231ca3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/c4dc0c4fa21f439ebca898dace231ca3 2024-12-03T15:22:47,871 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e637f28746574335a54622e6cfd20c23 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/B/e637f28746574335a54622e6cfd20c23 2024-12-03T15:22:47,873 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/21f440ad0b254d0fa11a4ccbc980e7a6 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/21f440ad0b254d0fa11a4ccbc980e7a6 2024-12-03T15:22:47,874 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/6daaef83ab5f40f3a17f784c09a222eb to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/6daaef83ab5f40f3a17f784c09a222eb 2024-12-03T15:22:47,882 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b11d823f9d974bcc8f2d13de99b970c0 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/C/b11d823f9d974bcc8f2d13de99b970c0 2024-12-03T15:22:47,886 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/recovered.edits/480.seqid to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a/recovered.edits/480.seqid 2024-12-03T15:22:47,886 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/6c345cf4429e47e0b5ec5adba6afb04a 2024-12-03T15:22:47,887 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-03T15:22:47,889 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=140, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:47,890 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-03T15:22:47,892 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-03T15:22:47,893 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=140, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:47,893 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-03T15:22:47,893 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733239367893"}]},"ts":"9223372036854775807"} 2024-12-03T15:22:47,894 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-03T15:22:47,895 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6c345cf4429e47e0b5ec5adba6afb04a, NAME => 'TestAcidGuarantees,,1733239342178.6c345cf4429e47e0b5ec5adba6afb04a.', STARTKEY => '', ENDKEY => ''}] 2024-12-03T15:22:47,895 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-03T15:22:47,895 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733239367895"}]},"ts":"9223372036854775807"} 2024-12-03T15:22:47,903 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-03T15:22:47,905 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=140, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:47,906 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 48 msec 2024-12-03T15:22:47,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-03T15:22:47,960 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-03T15:22:47,979 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=241 (was 241), OpenFileDescriptor=457 (was 461), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=853 (was 894), ProcessCount=11 (was 11), AvailableMemoryMB=1740 (was 1004) - AvailableMemoryMB LEAK? - 2024-12-03T15:22:47,989 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=241, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=853, ProcessCount=11, AvailableMemoryMB=1739 2024-12-03T15:22:47,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-03T15:22:47,991 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T15:22:47,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:47,993 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-03T15:22:47,993 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:47,993 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 141 2024-12-03T15:22:47,994 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-03T15:22:47,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-03T15:22:48,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742396_1572 (size=963) 2024-12-03T15:22:48,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-03T15:22:48,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-03T15:22:48,404 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411 2024-12-03T15:22:48,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742397_1573 (size=53) 2024-12-03T15:22:48,410 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:22:48,410 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 365d5514a5bb1b4ce69a418d0f3e9934, disabling compactions & flushes 2024-12-03T15:22:48,410 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:48,410 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:48,410 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. after waiting 0 ms 2024-12-03T15:22:48,410 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:48,410 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:48,410 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:48,411 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-03T15:22:48,411 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733239368411"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733239368411"}]},"ts":"1733239368411"} 2024-12-03T15:22:48,412 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-03T15:22:48,413 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-03T15:22:48,413 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239368413"}]},"ts":"1733239368413"} 2024-12-03T15:22:48,414 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-03T15:22:48,418 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=365d5514a5bb1b4ce69a418d0f3e9934, ASSIGN}] 2024-12-03T15:22:48,420 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=365d5514a5bb1b4ce69a418d0f3e9934, ASSIGN 2024-12-03T15:22:48,420 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=365d5514a5bb1b4ce69a418d0f3e9934, ASSIGN; state=OFFLINE, location=2b5ef621a0dd,46815,1733239226292; forceNewPlan=false, retain=false 2024-12-03T15:22:48,571 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=365d5514a5bb1b4ce69a418d0f3e9934, regionState=OPENING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:48,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; OpenRegionProcedure 365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:22:48,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-03T15:22:48,723 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:48,726 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:48,726 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(7285): Opening region: {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} 2024-12-03T15:22:48,727 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:48,727 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:22:48,727 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(7327): checking encryption for 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:48,727 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(7330): checking classloading for 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:48,728 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:48,729 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:22:48,729 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 365d5514a5bb1b4ce69a418d0f3e9934 columnFamilyName A 2024-12-03T15:22:48,729 DEBUG [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:48,730 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.HStore(327): Store=365d5514a5bb1b4ce69a418d0f3e9934/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:22:48,730 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:48,731 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:22:48,731 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 365d5514a5bb1b4ce69a418d0f3e9934 columnFamilyName B 2024-12-03T15:22:48,731 DEBUG [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:48,732 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.HStore(327): Store=365d5514a5bb1b4ce69a418d0f3e9934/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:22:48,732 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:48,732 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:22:48,732 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 365d5514a5bb1b4ce69a418d0f3e9934 columnFamilyName C 2024-12-03T15:22:48,733 DEBUG [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:48,733 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.HStore(327): Store=365d5514a5bb1b4ce69a418d0f3e9934/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:22:48,733 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:48,734 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:48,734 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:48,735 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T15:22:48,736 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1085): writing seq id for 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:48,738 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-03T15:22:48,738 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1102): Opened 365d5514a5bb1b4ce69a418d0f3e9934; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67332755, jitterRate=0.0033362358808517456}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T15:22:48,739 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegion(1001): Region open journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:48,739 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., pid=143, masterSystemTime=1733239368723 2024-12-03T15:22:48,741 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:48,741 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=143}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:48,741 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=365d5514a5bb1b4ce69a418d0f3e9934, regionState=OPEN, openSeqNum=2, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:48,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-03T15:22:48,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; OpenRegionProcedure 365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 in 170 msec 2024-12-03T15:22:48,744 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-03T15:22:48,745 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=365d5514a5bb1b4ce69a418d0f3e9934, ASSIGN in 325 msec 2024-12-03T15:22:48,745 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-03T15:22:48,745 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239368745"}]},"ts":"1733239368745"} 2024-12-03T15:22:48,746 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-03T15:22:48,749 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=141, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-03T15:22:48,749 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 758 msec 2024-12-03T15:22:49,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-03T15:22:49,098 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 141 completed 2024-12-03T15:22:49,100 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x537a66f8 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2ac53e79 2024-12-03T15:22:49,106 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d5efb7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:49,107 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:49,109 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51564, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:49,110 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-03T15:22:49,114 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42686, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-03T15:22:49,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-03T15:22:49,116 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-03T15:22:49,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-03T15:22:49,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742398_1574 (size=999) 2024-12-03T15:22:49,539 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-03T15:22:49,539 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-03T15:22:49,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-03T15:22:49,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=365d5514a5bb1b4ce69a418d0f3e9934, REOPEN/MOVE}] 2024-12-03T15:22:49,543 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=365d5514a5bb1b4ce69a418d0f3e9934, REOPEN/MOVE 2024-12-03T15:22:49,543 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=365d5514a5bb1b4ce69a418d0f3e9934, regionState=CLOSING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:49,544 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T15:22:49,544 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; CloseRegionProcedure 365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:22:49,695 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:49,696 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(124): Close 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:49,696 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-03T15:22:49,696 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1681): Closing 365d5514a5bb1b4ce69a418d0f3e9934, disabling compactions & flushes 2024-12-03T15:22:49,696 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:49,696 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:49,696 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. after waiting 0 ms 2024-12-03T15:22:49,696 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:49,699 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-03T15:22:49,699 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:49,699 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1635): Region close journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:49,700 WARN [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionServer(3786): Not adding moved region record: 365d5514a5bb1b4ce69a418d0f3e9934 to self. 2024-12-03T15:22:49,701 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(170): Closed 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:49,701 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=365d5514a5bb1b4ce69a418d0f3e9934, regionState=CLOSED 2024-12-03T15:22:49,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-03T15:22:49,703 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseRegionProcedure 365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 in 158 msec 2024-12-03T15:22:49,703 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=365d5514a5bb1b4ce69a418d0f3e9934, REOPEN/MOVE; state=CLOSED, location=2b5ef621a0dd,46815,1733239226292; forceNewPlan=false, retain=true 2024-12-03T15:22:49,854 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=365d5514a5bb1b4ce69a418d0f3e9934, regionState=OPENING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:49,855 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=146, state=RUNNABLE; OpenRegionProcedure 365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:22:50,006 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,009 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,009 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(7285): Opening region: {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} 2024-12-03T15:22:50,009 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:50,009 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-03T15:22:50,009 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(7327): checking encryption for 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:50,009 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(7330): checking classloading for 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:50,010 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:50,011 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:22:50,011 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 365d5514a5bb1b4ce69a418d0f3e9934 columnFamilyName A 2024-12-03T15:22:50,012 DEBUG [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:50,012 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.HStore(327): Store=365d5514a5bb1b4ce69a418d0f3e9934/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:22:50,013 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:50,013 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:22:50,013 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 365d5514a5bb1b4ce69a418d0f3e9934 columnFamilyName B 2024-12-03T15:22:50,013 DEBUG [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:50,014 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.HStore(327): Store=365d5514a5bb1b4ce69a418d0f3e9934/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:22:50,014 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:50,014 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-03T15:22:50,014 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 365d5514a5bb1b4ce69a418d0f3e9934 columnFamilyName C 2024-12-03T15:22:50,014 DEBUG [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:50,015 INFO [StoreOpener-365d5514a5bb1b4ce69a418d0f3e9934-1 {}] regionserver.HStore(327): Store=365d5514a5bb1b4ce69a418d0f3e9934/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-03T15:22:50,015 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,015 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:50,016 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:50,017 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-03T15:22:50,018 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1085): writing seq id for 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:50,019 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1102): Opened 365d5514a5bb1b4ce69a418d0f3e9934; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62218463, jitterRate=-0.07287265360355377}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-03T15:22:50,019 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegion(1001): Region open journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:50,020 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., pid=148, masterSystemTime=1733239370006 2024-12-03T15:22:50,021 DEBUG [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,021 INFO [RS_OPEN_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_OPEN_REGION, pid=148}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,022 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=365d5514a5bb1b4ce69a418d0f3e9934, regionState=OPEN, openSeqNum=5, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,023 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=146 2024-12-03T15:22:50,023 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=146, state=SUCCESS; OpenRegionProcedure 365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 in 167 msec 2024-12-03T15:22:50,024 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-12-03T15:22:50,024 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=365d5514a5bb1b4ce69a418d0f3e9934, REOPEN/MOVE in 481 msec 2024-12-03T15:22:50,026 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-03T15:22:50,026 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 484 msec 2024-12-03T15:22:50,027 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 910 msec 2024-12-03T15:22:50,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-03T15:22:50,029 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06094c70 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc9c3e 2024-12-03T15:22:50,032 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc332d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:50,033 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x103dfc6e to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7181df3b 2024-12-03T15:22:50,038 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17327621, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:50,039 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e047c09 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11030ef5 2024-12-03T15:22:50,042 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1584f18a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:50,043 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60d631a3 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69abefea 2024-12-03T15:22:50,046 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b914bf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:50,047 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58971172 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e757135 2024-12-03T15:22:50,050 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f6a59e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:50,050 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7846cb78 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@150e08ed 2024-12-03T15:22:50,053 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53305d9b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:50,054 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f1754bc to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a3b66d3 2024-12-03T15:22:50,057 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bb6288a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:50,058 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d9113f3 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5cfdf76c 2024-12-03T15:22:50,060 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6556601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:50,061 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bb75907 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68c2838a 2024-12-03T15:22:50,064 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@458a85fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:50,065 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c1d3a95 to 127.0.0.1:60989 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50bf224f 2024-12-03T15:22:50,068 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@410bf0c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-03T15:22:50,070 DEBUG [hconnection-0x7f457114-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:50,070 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:50,071 DEBUG [hconnection-0x12b5ffb2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:50,072 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51578, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:50,072 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51572, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:50,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=149, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees 2024-12-03T15:22:50,072 DEBUG [hconnection-0x13e88f99-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:50,073 DEBUG [hconnection-0x45ba86d7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:50,073 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=149, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:50,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-03T15:22:50,073 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:50,074 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51588, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:50,074 DEBUG [hconnection-0x26d3cf66-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:50,074 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=149, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:50,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:50,074 DEBUG [hconnection-0x1b7dda7a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:50,075 DEBUG [hconnection-0x74581f15-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:50,075 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51598, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:50,076 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51618, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:50,076 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51614, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:50,076 DEBUG [hconnection-0x43760125-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:50,076 DEBUG [hconnection-0x4f849a73-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:50,079 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51644, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:50,079 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51630, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:50,082 DEBUG [hconnection-0x1724569a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-03T15:22:50,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:50,083 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51648, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-03T15:22:50,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:22:50,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:22:50,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:50,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:22:50,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:50,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:22:50,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:50,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239430106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239430107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239430108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239430108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239430111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203bc07c3a494e044048e07dec585f64dad_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239370081/Put/seqid=0 2024-12-03T15:22:50,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742399_1575 (size=12154) 2024-12-03T15:22:50,132 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:50,138 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203bc07c3a494e044048e07dec585f64dad_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203bc07c3a494e044048e07dec585f64dad_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:50,139 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/38d631c324374dedaeeb124def735ba2, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:50,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/38d631c324374dedaeeb124def735ba2 is 175, key is test_row_0/A:col10/1733239370081/Put/seqid=0 2024-12-03T15:22:50,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742400_1576 (size=30955) 2024-12-03T15:22:50,166 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/38d631c324374dedaeeb124def735ba2 2024-12-03T15:22:50,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-03T15:22:50,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/cdfc52f310bc410aa7f59f806eec3f1f is 50, key is test_row_0/B:col10/1733239370081/Put/seqid=0 2024-12-03T15:22:50,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742401_1577 (size=12001) 2024-12-03T15:22:50,209 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/cdfc52f310bc410aa7f59f806eec3f1f 2024-12-03T15:22:50,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239430214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239430214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239430214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239430214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239430214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,226 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-03T15:22:50,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:50,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:50,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:50,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:50,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/972d3dbfbda14b7bbccedb391f0c527e is 50, key is test_row_0/C:col10/1733239370081/Put/seqid=0 2024-12-03T15:22:50,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742402_1578 (size=12001) 2024-12-03T15:22:50,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-03T15:22:50,379 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-03T15:22:50,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:50,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,380 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:50,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:50,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:50,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239430416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239430417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239430417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239430417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239430418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,532 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-03T15:22:50,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:50,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,538 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:50,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:50,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:50,661 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/972d3dbfbda14b7bbccedb391f0c527e 2024-12-03T15:22:50,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-03T15:22:50,691 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-03T15:22:50,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:50,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:50,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:50,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:50,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/38d631c324374dedaeeb124def735ba2 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/38d631c324374dedaeeb124def735ba2 2024-12-03T15:22:50,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239430720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239430721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239430722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239430723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:50,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239430730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,732 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/38d631c324374dedaeeb124def735ba2, entries=150, sequenceid=17, filesize=30.2 K 2024-12-03T15:22:50,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/cdfc52f310bc410aa7f59f806eec3f1f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/cdfc52f310bc410aa7f59f806eec3f1f 2024-12-03T15:22:50,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/cdfc52f310bc410aa7f59f806eec3f1f, entries=150, sequenceid=17, filesize=11.7 K 2024-12-03T15:22:50,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/972d3dbfbda14b7bbccedb391f0c527e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/972d3dbfbda14b7bbccedb391f0c527e 2024-12-03T15:22:50,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/972d3dbfbda14b7bbccedb391f0c527e, entries=150, sequenceid=17, filesize=11.7 K 2024-12-03T15:22:50,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 365d5514a5bb1b4ce69a418d0f3e9934 in 663ms, sequenceid=17, compaction requested=false 2024-12-03T15:22:50,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:50,852 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:50,853 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-03T15:22:50,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:50,853 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-03T15:22:50,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:22:50,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:50,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:22:50,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:50,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:22:50,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:50,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120397264fa80af747d3bec3613bfba6aef5_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239370105/Put/seqid=0 2024-12-03T15:22:50,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742403_1579 (size=12154) 2024-12-03T15:22:50,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:50,962 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120397264fa80af747d3bec3613bfba6aef5_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120397264fa80af747d3bec3613bfba6aef5_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:50,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/60364f25e1054e73986ee10f0ad6f5d9, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:50,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/60364f25e1054e73986ee10f0ad6f5d9 is 175, key is test_row_0/A:col10/1733239370105/Put/seqid=0 2024-12-03T15:22:51,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742404_1580 (size=30955) 2024-12-03T15:22:51,018 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/60364f25e1054e73986ee10f0ad6f5d9 2024-12-03T15:22:51,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/afa3aba9c2ac4fcfb89a0b37d1b16f39 is 50, key is test_row_0/B:col10/1733239370105/Put/seqid=0 2024-12-03T15:22:51,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742405_1581 (size=12001) 2024-12-03T15:22:51,070 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/afa3aba9c2ac4fcfb89a0b37d1b16f39 2024-12-03T15:22:51,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/dc2f7f1b5bd94e20ab4eac8b5a8fe665 is 50, key is test_row_0/C:col10/1733239370105/Put/seqid=0 2024-12-03T15:22:51,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742406_1582 (size=12001) 2024-12-03T15:22:51,108 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/dc2f7f1b5bd94e20ab4eac8b5a8fe665 2024-12-03T15:22:51,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/60364f25e1054e73986ee10f0ad6f5d9 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/60364f25e1054e73986ee10f0ad6f5d9 2024-12-03T15:22:51,121 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/60364f25e1054e73986ee10f0ad6f5d9, entries=150, sequenceid=41, filesize=30.2 K 2024-12-03T15:22:51,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/afa3aba9c2ac4fcfb89a0b37d1b16f39 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/afa3aba9c2ac4fcfb89a0b37d1b16f39 2024-12-03T15:22:51,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,126 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/afa3aba9c2ac4fcfb89a0b37d1b16f39, entries=150, sequenceid=41, filesize=11.7 K 2024-12-03T15:22:51,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/dc2f7f1b5bd94e20ab4eac8b5a8fe665 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/dc2f7f1b5bd94e20ab4eac8b5a8fe665 2024-12-03T15:22:51,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,131 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/dc2f7f1b5bd94e20ab4eac8b5a8fe665, entries=150, sequenceid=41, filesize=11.7 K 2024-12-03T15:22:51,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,138 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 365d5514a5bb1b4ce69a418d0f3e9934 in 285ms, sequenceid=41, compaction requested=false 2024-12-03T15:22:51,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:51,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:51,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=150 2024-12-03T15:22:51,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=150 2024-12-03T15:22:51,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,141 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-03T15:22:51,141 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0650 sec 2024-12-03T15:22:51,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,142 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees in 1.0710 sec 2024-12-03T15:22:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-03T15:22:51,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,176 INFO [Thread-2542 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 149 completed 2024-12-03T15:22:51,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,177 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:51,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees 2024-12-03T15:22:51,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-03T15:22:51,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,179 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:51,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,180 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:51,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:51,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:51,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:22:51,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:22:51,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:51,263 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:22:51,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:51,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:22:51,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:51,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203b16b1030d4174524ad8317391f832ea4_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239371252/Put/seqid=0 2024-12-03T15:22:51,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-03T15:22:51,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742408_1584 (size=24358) 2024-12-03T15:22:51,332 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-03T15:22:51,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:51,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:51,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:51,334 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239431353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239431357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239431357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239431360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239431360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239431462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239431462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239431465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239431465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239431467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-03T15:22:51,487 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-03T15:22:51,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:51,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:51,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:51,489 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,644 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-03T15:22:51,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:51,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:51,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:51,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239431664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239431666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239431668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239431669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239431671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,726 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:51,733 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203b16b1030d4174524ad8317391f832ea4_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203b16b1030d4174524ad8317391f832ea4_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:51,734 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/503b4cff08e243288dd7332d78bbbc2d, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:51,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/503b4cff08e243288dd7332d78bbbc2d is 175, key is test_row_0/A:col10/1733239371252/Put/seqid=0 2024-12-03T15:22:51,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742407_1583 (size=73994) 2024-12-03T15:22:51,737 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/503b4cff08e243288dd7332d78bbbc2d 2024-12-03T15:22:51,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/6d82bcf54ad54dd99cd93dde6b45d559 is 50, key is test_row_0/B:col10/1733239371252/Put/seqid=0 2024-12-03T15:22:51,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742409_1585 (size=12001) 2024-12-03T15:22:51,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-03T15:22:51,802 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-03T15:22:51,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:51,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:51,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:51,806 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239431969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239431970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,978 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-03T15:22:51,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:51,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:51,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:51,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:51,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239431978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239431978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:51,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:51,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239431981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:52,131 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:52,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-03T15:22:52,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:52,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:52,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:52,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:52,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:52,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:52,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/6d82bcf54ad54dd99cd93dde6b45d559 2024-12-03T15:22:52,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/f40d261b8d74481aba2b73c65a34b332 is 50, key is test_row_0/C:col10/1733239371252/Put/seqid=0 2024-12-03T15:22:52,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742410_1586 (size=12001) 2024-12-03T15:22:52,284 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:52,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-03T15:22:52,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:52,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:52,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:52,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:52,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:52,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-03T15:22:52,436 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:52,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-03T15:22:52,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:52,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:52,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:52,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:52,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:52,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:52,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239432477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:52,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239432478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:52,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:52,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239432484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:52,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:52,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239432484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:52,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:52,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239432486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:52,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/f40d261b8d74481aba2b73c65a34b332 2024-12-03T15:22:52,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/503b4cff08e243288dd7332d78bbbc2d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/503b4cff08e243288dd7332d78bbbc2d 2024-12-03T15:22:52,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/503b4cff08e243288dd7332d78bbbc2d, entries=400, sequenceid=52, filesize=72.3 K 2024-12-03T15:22:52,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/6d82bcf54ad54dd99cd93dde6b45d559 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/6d82bcf54ad54dd99cd93dde6b45d559 2024-12-03T15:22:52,577 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/6d82bcf54ad54dd99cd93dde6b45d559, entries=150, sequenceid=52, filesize=11.7 K 2024-12-03T15:22:52,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/f40d261b8d74481aba2b73c65a34b332 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/f40d261b8d74481aba2b73c65a34b332 2024-12-03T15:22:52,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/f40d261b8d74481aba2b73c65a34b332, entries=150, sequenceid=52, filesize=11.7 K 2024-12-03T15:22:52,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 365d5514a5bb1b4ce69a418d0f3e9934 in 1319ms, sequenceid=52, compaction requested=true 2024-12-03T15:22:52,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:52,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:52,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:52,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:52,582 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:52,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:52,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:52,582 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:52,582 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:52,583 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 135904 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:52,583 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:52,583 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/A is initiating minor compaction (all files) 2024-12-03T15:22:52,583 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/B is initiating minor compaction (all files) 2024-12-03T15:22:52,583 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/B in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:52,583 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/A in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:52,583 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/38d631c324374dedaeeb124def735ba2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/60364f25e1054e73986ee10f0ad6f5d9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/503b4cff08e243288dd7332d78bbbc2d] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=132.7 K 2024-12-03T15:22:52,583 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/cdfc52f310bc410aa7f59f806eec3f1f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/afa3aba9c2ac4fcfb89a0b37d1b16f39, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/6d82bcf54ad54dd99cd93dde6b45d559] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=35.2 K 2024-12-03T15:22:52,583 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:52,583 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/38d631c324374dedaeeb124def735ba2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/60364f25e1054e73986ee10f0ad6f5d9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/503b4cff08e243288dd7332d78bbbc2d] 2024-12-03T15:22:52,584 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting cdfc52f310bc410aa7f59f806eec3f1f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733239370081 2024-12-03T15:22:52,584 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38d631c324374dedaeeb124def735ba2, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733239370081 2024-12-03T15:22:52,584 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting afa3aba9c2ac4fcfb89a0b37d1b16f39, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733239370101 2024-12-03T15:22:52,584 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60364f25e1054e73986ee10f0ad6f5d9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733239370101 2024-12-03T15:22:52,584 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d82bcf54ad54dd99cd93dde6b45d559, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733239371252 2024-12-03T15:22:52,584 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 503b4cff08e243288dd7332d78bbbc2d, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733239371250 2024-12-03T15:22:52,589 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:52,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-03T15:22:52,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:52,590 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:52,590 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-03T15:22:52,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:22:52,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:52,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:22:52,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:52,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:22:52,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:52,592 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#B#compaction#496 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:52,592 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/85302b7c0f44456dab93a28f514fb27b is 50, key is test_row_0/B:col10/1733239371252/Put/seqid=0 2024-12-03T15:22:52,594 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241203ecf3771419044f198a2984eecaeba062_365d5514a5bb1b4ce69a418d0f3e9934 store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:52,597 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241203ecf3771419044f198a2984eecaeba062_365d5514a5bb1b4ce69a418d0f3e9934, store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:52,597 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203ecf3771419044f198a2984eecaeba062_365d5514a5bb1b4ce69a418d0f3e9934 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:52,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742411_1587 (size=12104) 2024-12-03T15:22:52,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120303fb417030174eb0a648175542f45980_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239371359/Put/seqid=0 2024-12-03T15:22:52,618 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/85302b7c0f44456dab93a28f514fb27b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/85302b7c0f44456dab93a28f514fb27b 2024-12-03T15:22:52,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742412_1588 (size=4469) 2024-12-03T15:22:52,620 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#A#compaction#495 average throughput is 0.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:52,621 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/dfc386aa5ec548a89dc1f0c7534b5f44 is 175, key is test_row_0/A:col10/1733239371252/Put/seqid=0 2024-12-03T15:22:52,624 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/B of 365d5514a5bb1b4ce69a418d0f3e9934 into 85302b7c0f44456dab93a28f514fb27b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:52,624 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:52,624 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/B, priority=13, startTime=1733239372582; duration=0sec 2024-12-03T15:22:52,624 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:52,624 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:B 2024-12-03T15:22:52,625 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:52,626 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:52,626 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/C is initiating minor compaction (all files) 2024-12-03T15:22:52,626 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/C in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:52,626 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/972d3dbfbda14b7bbccedb391f0c527e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/dc2f7f1b5bd94e20ab4eac8b5a8fe665, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/f40d261b8d74481aba2b73c65a34b332] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=35.2 K 2024-12-03T15:22:52,626 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 972d3dbfbda14b7bbccedb391f0c527e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733239370081 2024-12-03T15:22:52,627 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting dc2f7f1b5bd94e20ab4eac8b5a8fe665, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733239370101 2024-12-03T15:22:52,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742413_1589 (size=31058) 2024-12-03T15:22:52,627 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting f40d261b8d74481aba2b73c65a34b332, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733239371252 2024-12-03T15:22:52,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742414_1590 (size=12154) 2024-12-03T15:22:52,634 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#C#compaction#498 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:52,634 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/351e223d6a3c430ab4465457b5f84630 is 50, key is test_row_0/C:col10/1733239371252/Put/seqid=0 2024-12-03T15:22:52,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,637 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/dfc386aa5ec548a89dc1f0c7534b5f44 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/dfc386aa5ec548a89dc1f0c7534b5f44 2024-12-03T15:22:52,639 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120303fb417030174eb0a648175542f45980_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120303fb417030174eb0a648175542f45980_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:52,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/2ac3cafd7c914787a985b04e810a810c, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:52,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/2ac3cafd7c914787a985b04e810a810c is 175, key is test_row_0/A:col10/1733239371359/Put/seqid=0 2024-12-03T15:22:52,643 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/A of 365d5514a5bb1b4ce69a418d0f3e9934 into dfc386aa5ec548a89dc1f0c7534b5f44(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:52,643 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:52,643 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/A, priority=13, startTime=1733239372582; duration=0sec 2024-12-03T15:22:52,643 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:52,643 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:A 2024-12-03T15:22:52,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742415_1591 (size=12104) 2024-12-03T15:22:52,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742416_1592 (size=30955) 2024-12-03T15:22:52,655 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/2ac3cafd7c914787a985b04e810a810c 2024-12-03T15:22:52,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/d341e3098b78459b991a075e8db662fe is 50, key is test_row_0/B:col10/1733239371359/Put/seqid=0 2024-12-03T15:22:52,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742417_1593 (size=12001) 2024-12-03T15:22:52,666 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/d341e3098b78459b991a075e8db662fe 2024-12-03T15:22:52,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/b2ba7c3ae72249b7b89302e0b318594d is 50, key is test_row_0/C:col10/1733239371359/Put/seqid=0 2024-12-03T15:22:52,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742418_1594 (size=12001) 2024-12-03T15:22:52,685 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/b2ba7c3ae72249b7b89302e0b318594d 2024-12-03T15:22:52,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/2ac3cafd7c914787a985b04e810a810c as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2ac3cafd7c914787a985b04e810a810c 2024-12-03T15:22:52,693 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2ac3cafd7c914787a985b04e810a810c, entries=150, sequenceid=78, filesize=30.2 K 2024-12-03T15:22:52,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/d341e3098b78459b991a075e8db662fe as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/d341e3098b78459b991a075e8db662fe 2024-12-03T15:22:52,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,696 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/d341e3098b78459b991a075e8db662fe, entries=150, sequenceid=78, filesize=11.7 K 2024-12-03T15:22:52,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/b2ba7c3ae72249b7b89302e0b318594d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/b2ba7c3ae72249b7b89302e0b318594d 2024-12-03T15:22:52,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,701 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/b2ba7c3ae72249b7b89302e0b318594d, entries=150, sequenceid=78, filesize=11.7 K 2024-12-03T15:22:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,702 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=0 B/0 for 365d5514a5bb1b4ce69a418d0f3e9934 in 112ms, sequenceid=78, compaction requested=false 2024-12-03T15:22:52,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:52,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:52,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=152 2024-12-03T15:22:52,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=152 2024-12-03T15:22:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,705 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-12-03T15:22:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,705 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5230 sec 2024-12-03T15:22:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees in 1.5270 sec 2024-12-03T15:22:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:52,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,081 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/351e223d6a3c430ab4465457b5f84630 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/351e223d6a3c430ab4465457b5f84630 2024-12-03T15:22:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,111 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/C of 365d5514a5bb1b4ce69a418d0f3e9934 into 351e223d6a3c430ab4465457b5f84630(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:53,111 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:53,111 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/C, priority=13, startTime=1733239372582; duration=0sec 2024-12-03T15:22:53,112 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:53,112 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:C 2024-12-03T15:22:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-03T15:22:53,295 INFO [Thread-2542 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 151 completed 2024-12-03T15:22:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,301 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-12-03T15:22:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,303 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,303 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:53,303 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-03T15:22:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,357 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-03T15:22:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-03T15:22:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,455 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-03T15:22:53,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:53,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:53,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-03T15:22:53,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-03T15:22:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,459 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-12-03T15:22:53,459 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 155 msec 2024-12-03T15:22:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 159 msec 2024-12-03T15:22:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:22:53,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:22:53,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:53,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:22:53,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:53,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:22:53,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203c21f0b322c7543df9134e982a0d36452_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239373556/Put/seqid=0 2024-12-03T15:22:53,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-03T15:22:53,607 INFO [Thread-2542 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-12-03T15:22:53,608 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:53,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-12-03T15:22:53,610 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:53,611 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:53,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:53,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-03T15:22:53,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742420_1596 (size=24358) 2024-12-03T15:22:53,632 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:53,636 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203c21f0b322c7543df9134e982a0d36452_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203c21f0b322c7543df9134e982a0d36452_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:53,637 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/c1e4b9a8bfbc4ffb9c303732e5ce3a60, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:53,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/c1e4b9a8bfbc4ffb9c303732e5ce3a60 is 175, key is test_row_0/A:col10/1733239373556/Put/seqid=0 2024-12-03T15:22:53,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742419_1595 (size=73995) 2024-12-03T15:22:53,641 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/c1e4b9a8bfbc4ffb9c303732e5ce3a60 2024-12-03T15:22:53,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239433654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/2ca2bdf14c4a41b9ab92caf4551fa1aa is 50, key is test_row_0/B:col10/1733239373556/Put/seqid=0 2024-12-03T15:22:53,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239433656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239433657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,663 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239433658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239433660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742421_1597 (size=12001) 2024-12-03T15:22:53,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-03T15:22:53,763 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-03T15:22:53,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:53,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:53,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:53,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:53,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:53,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:53,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239433763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239433765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239433765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239433765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239433768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-03T15:22:53,920 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-03T15:22:53,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:53,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:53,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:53,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:53,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:53,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:53,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239433969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239433970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239433971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239433973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:53,977 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:53,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239433975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,073 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-03T15:22:54,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:54,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:54,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:54,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:54,108 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/2ca2bdf14c4a41b9ab92caf4551fa1aa 2024-12-03T15:22:54,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/fa54485d66334615b093421ba9ce44b7 is 50, key is test_row_0/C:col10/1733239373556/Put/seqid=0 2024-12-03T15:22:54,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742422_1598 (size=12001) 2024-12-03T15:22:54,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-03T15:22:54,226 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-03T15:22:54,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:54,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:54,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:54,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:54,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239434273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239434275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239434277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239434278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239434280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,378 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-03T15:22:54,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:54,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:54,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:54,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:54,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/fa54485d66334615b093421ba9ce44b7 2024-12-03T15:22:54,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/c1e4b9a8bfbc4ffb9c303732e5ce3a60 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/c1e4b9a8bfbc4ffb9c303732e5ce3a60 2024-12-03T15:22:54,532 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,534 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-03T15:22:54,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:54,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,534 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:54,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:54,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/c1e4b9a8bfbc4ffb9c303732e5ce3a60, entries=400, sequenceid=93, filesize=72.3 K 2024-12-03T15:22:54,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:54,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/2ca2bdf14c4a41b9ab92caf4551fa1aa as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/2ca2bdf14c4a41b9ab92caf4551fa1aa 2024-12-03T15:22:54,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/2ca2bdf14c4a41b9ab92caf4551fa1aa, entries=150, sequenceid=93, filesize=11.7 K 2024-12-03T15:22:54,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/fa54485d66334615b093421ba9ce44b7 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/fa54485d66334615b093421ba9ce44b7 2024-12-03T15:22:54,545 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/fa54485d66334615b093421ba9ce44b7, entries=150, sequenceid=93, filesize=11.7 K 2024-12-03T15:22:54,546 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 365d5514a5bb1b4ce69a418d0f3e9934 in 982ms, sequenceid=93, compaction requested=true 2024-12-03T15:22:54,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:54,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:54,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:54,546 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:54,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:54,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:54,546 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:54,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:54,547 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:54,547 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136008 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:54,547 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/A is initiating minor compaction (all files) 2024-12-03T15:22:54,547 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/A in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,547 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/dfc386aa5ec548a89dc1f0c7534b5f44, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2ac3cafd7c914787a985b04e810a810c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/c1e4b9a8bfbc4ffb9c303732e5ce3a60] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=132.8 K 2024-12-03T15:22:54,547 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,547 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/dfc386aa5ec548a89dc1f0c7534b5f44, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2ac3cafd7c914787a985b04e810a810c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/c1e4b9a8bfbc4ffb9c303732e5ce3a60] 2024-12-03T15:22:54,549 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:54,549 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/B is initiating minor compaction (all files) 2024-12-03T15:22:54,549 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/B in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,549 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/85302b7c0f44456dab93a28f514fb27b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/d341e3098b78459b991a075e8db662fe, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/2ca2bdf14c4a41b9ab92caf4551fa1aa] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=35.3 K 2024-12-03T15:22:54,549 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfc386aa5ec548a89dc1f0c7534b5f44, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733239371252 2024-12-03T15:22:54,550 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 85302b7c0f44456dab93a28f514fb27b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733239371252 2024-12-03T15:22:54,550 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting d341e3098b78459b991a075e8db662fe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239371334 2024-12-03T15:22:54,550 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ca2bdf14c4a41b9ab92caf4551fa1aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239373556 2024-12-03T15:22:54,552 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ac3cafd7c914787a985b04e810a810c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239371334 2024-12-03T15:22:54,554 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1e4b9a8bfbc4ffb9c303732e5ce3a60, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239373525 2024-12-03T15:22:54,561 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:54,562 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#B#compaction#504 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:54,562 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/0e7503e847174867abc634f4c2142ca1 is 50, key is test_row_0/B:col10/1733239373556/Put/seqid=0 2024-12-03T15:22:54,566 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241203375f7eba4eaf43aaadd3178fe93f4b02_365d5514a5bb1b4ce69a418d0f3e9934 store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:54,569 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241203375f7eba4eaf43aaadd3178fe93f4b02_365d5514a5bb1b4ce69a418d0f3e9934, store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:54,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742423_1599 (size=12207) 2024-12-03T15:22:54,570 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203375f7eba4eaf43aaadd3178fe93f4b02_365d5514a5bb1b4ce69a418d0f3e9934 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:54,580 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/0e7503e847174867abc634f4c2142ca1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/0e7503e847174867abc634f4c2142ca1 2024-12-03T15:22:54,586 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/B of 365d5514a5bb1b4ce69a418d0f3e9934 into 0e7503e847174867abc634f4c2142ca1(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:54,586 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:54,586 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/B, priority=13, startTime=1733239374546; duration=0sec 2024-12-03T15:22:54,586 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:54,586 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:B 2024-12-03T15:22:54,586 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:54,588 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:54,588 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/C is initiating minor compaction (all files) 2024-12-03T15:22:54,588 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/C in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,588 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/351e223d6a3c430ab4465457b5f84630, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/b2ba7c3ae72249b7b89302e0b318594d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/fa54485d66334615b093421ba9ce44b7] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=35.3 K 2024-12-03T15:22:54,588 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 351e223d6a3c430ab4465457b5f84630, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733239371252 2024-12-03T15:22:54,589 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting b2ba7c3ae72249b7b89302e0b318594d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733239371334 2024-12-03T15:22:54,590 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting fa54485d66334615b093421ba9ce44b7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239373556 2024-12-03T15:22:54,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742424_1600 (size=4469) 2024-12-03T15:22:54,592 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#A#compaction#505 average throughput is 0.79 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:54,593 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/0e0ce537095e44a59ab8184a1fef7c44 is 175, key is test_row_0/A:col10/1733239373556/Put/seqid=0 2024-12-03T15:22:54,604 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#C#compaction#506 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:54,604 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/badf67c079234d21b1ea7241d92f654e is 50, key is test_row_0/C:col10/1733239373556/Put/seqid=0 2024-12-03T15:22:54,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742425_1601 (size=31161) 2024-12-03T15:22:54,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742426_1602 (size=12207) 2024-12-03T15:22:54,676 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/badf67c079234d21b1ea7241d92f654e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/badf67c079234d21b1ea7241d92f654e 2024-12-03T15:22:54,680 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/C of 365d5514a5bb1b4ce69a418d0f3e9934 into badf67c079234d21b1ea7241d92f654e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:54,680 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:54,680 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/C, priority=13, startTime=1733239374546; duration=0sec 2024-12-03T15:22:54,680 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:54,680 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:C 2024-12-03T15:22:54,683 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-03T15:22:54,686 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-03T15:22:54,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:54,687 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-03T15:22:54,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:22:54,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:54,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:22:54,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:54,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:22:54,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:54,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120388682fdd3c2f47eb8257c1f77f42af2e_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239373655/Put/seqid=0 2024-12-03T15:22:54,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-03T15:22:54,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742427_1603 (size=12154) 2024-12-03T15:22:54,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:54,721 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120388682fdd3c2f47eb8257c1f77f42af2e_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120388682fdd3c2f47eb8257c1f77f42af2e_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:54,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/d9fb26d871d940268a5af641b7351e77, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:54,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/d9fb26d871d940268a5af641b7351e77 is 175, key is test_row_0/A:col10/1733239373655/Put/seqid=0 2024-12-03T15:22:54,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742428_1604 (size=30955) 2024-12-03T15:22:54,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:54,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:54,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239434786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239434786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239434786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239434787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239434787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239434896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239434897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239434897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:54,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:54,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239434902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,035 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/0e0ce537095e44a59ab8184a1fef7c44 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/0e0ce537095e44a59ab8184a1fef7c44 2024-12-03T15:22:55,039 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/A of 365d5514a5bb1b4ce69a418d0f3e9934 into 0e0ce537095e44a59ab8184a1fef7c44(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:55,039 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:55,039 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/A, priority=13, startTime=1733239374546; duration=0sec 2024-12-03T15:22:55,039 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:55,039 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:A 2024-12-03T15:22:55,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239435102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239435102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239435103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239435114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,154 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=122, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/d9fb26d871d940268a5af641b7351e77 2024-12-03T15:22:55,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/043f3b299472450cbe2182485e2c4b42 is 50, key is test_row_0/B:col10/1733239373655/Put/seqid=0 2024-12-03T15:22:55,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742429_1605 (size=12001) 2024-12-03T15:22:55,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239435405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239435405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239435407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239435416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,576 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/043f3b299472450cbe2182485e2c4b42 2024-12-03T15:22:55,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/fd278a6c3d28461099bb0679025532b3 is 50, key is test_row_0/C:col10/1733239373655/Put/seqid=0 2024-12-03T15:22:55,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742430_1606 (size=12001) 2024-12-03T15:22:55,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-03T15:22:55,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239435793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239435914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239435914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239435915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:55,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:55,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239435923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:56,022 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/fd278a6c3d28461099bb0679025532b3 2024-12-03T15:22:56,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/d9fb26d871d940268a5af641b7351e77 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/d9fb26d871d940268a5af641b7351e77 2024-12-03T15:22:56,033 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/d9fb26d871d940268a5af641b7351e77, entries=150, sequenceid=122, filesize=30.2 K 2024-12-03T15:22:56,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/043f3b299472450cbe2182485e2c4b42 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/043f3b299472450cbe2182485e2c4b42 2024-12-03T15:22:56,039 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/043f3b299472450cbe2182485e2c4b42, entries=150, sequenceid=122, filesize=11.7 K 2024-12-03T15:22:56,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/fd278a6c3d28461099bb0679025532b3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/fd278a6c3d28461099bb0679025532b3 2024-12-03T15:22:56,055 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/fd278a6c3d28461099bb0679025532b3, entries=150, sequenceid=122, filesize=11.7 K 2024-12-03T15:22:56,056 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 365d5514a5bb1b4ce69a418d0f3e9934 in 1369ms, sequenceid=122, compaction requested=false 2024-12-03T15:22:56,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:56,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:56,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-03T15:22:56,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-12-03T15:22:56,059 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-03T15:22:56,059 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4460 sec 2024-12-03T15:22:56,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 2.4510 sec 2024-12-03T15:22:56,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:56,920 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:22:56,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:22:56,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:56,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:22:56,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:56,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:22:56,921 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:56,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203e94703ff41ec4b4d90ff80053c95ad93_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239374786/Put/seqid=0 2024-12-03T15:22:56,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742431_1607 (size=12254) 2024-12-03T15:22:56,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:56,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239436968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:56,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:56,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:56,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239436971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:56,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239436970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:56,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:56,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239436971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239437072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239437075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239437075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239437075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239437275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239437277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239437277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239437278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,335 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:57,338 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203e94703ff41ec4b4d90ff80053c95ad93_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203e94703ff41ec4b4d90ff80053c95ad93_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:57,339 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/f25a33c1d9ca4a53868b272e163676aa, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:57,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/f25a33c1d9ca4a53868b272e163676aa is 175, key is test_row_0/A:col10/1733239374786/Put/seqid=0 2024-12-03T15:22:57,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742432_1608 (size=31055) 2024-12-03T15:22:57,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239437578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239437579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239437581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239437582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-03T15:22:57,717 INFO [Thread-2542 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-03T15:22:57,718 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:22:57,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-12-03T15:22:57,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-03T15:22:57,720 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:22:57,720 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:22:57,720 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:22:57,744 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/f25a33c1d9ca4a53868b272e163676aa 2024-12-03T15:22:57,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/1f3127b6724e4d73ba2aa50398093fc8 is 50, key is test_row_0/B:col10/1733239374786/Put/seqid=0 2024-12-03T15:22:57,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742433_1609 (size=12101) 2024-12-03T15:22:57,805 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:57,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239437804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,805 DEBUG [Thread-2540 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:22:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-03T15:22:57,872 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:57,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-03T15:22:57,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:57,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:57,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:57,872 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:57,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-03T15:22:58,024 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:58,024 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-03T15:22:58,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:58,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,025 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:58,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239438081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:58,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:58,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239438082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:58,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239438084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:58,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239438085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:58,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/1f3127b6724e4d73ba2aa50398093fc8 2024-12-03T15:22:58,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/f6a5843c8b54467a94dbedb32a924ed1 is 50, key is test_row_0/C:col10/1733239374786/Put/seqid=0 2024-12-03T15:22:58,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742434_1610 (size=12101) 2024-12-03T15:22:58,176 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:58,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-03T15:22:58,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:58,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,177 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-03T15:22:58,333 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:58,333 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-03T15:22:58,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:58,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,334 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,486 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:58,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-03T15:22:58,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:58,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,486 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:22:58,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/f6a5843c8b54467a94dbedb32a924ed1 2024-12-03T15:22:58,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/f25a33c1d9ca4a53868b272e163676aa as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/f25a33c1d9ca4a53868b272e163676aa 2024-12-03T15:22:58,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/f25a33c1d9ca4a53868b272e163676aa, entries=150, sequenceid=134, filesize=30.3 K 2024-12-03T15:22:58,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/1f3127b6724e4d73ba2aa50398093fc8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/1f3127b6724e4d73ba2aa50398093fc8 2024-12-03T15:22:58,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/1f3127b6724e4d73ba2aa50398093fc8, entries=150, sequenceid=134, filesize=11.8 K 2024-12-03T15:22:58,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/f6a5843c8b54467a94dbedb32a924ed1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/f6a5843c8b54467a94dbedb32a924ed1 2024-12-03T15:22:58,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/f6a5843c8b54467a94dbedb32a924ed1, entries=150, sequenceid=134, filesize=11.8 K 2024-12-03T15:22:58,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 365d5514a5bb1b4ce69a418d0f3e9934 in 1678ms, sequenceid=134, compaction requested=true 2024-12-03T15:22:58,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:58,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:22:58,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:58,598 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:58,598 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:58,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:22:58,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:58,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:22:58,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:58,599 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:58,599 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93171 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:58,599 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/B is initiating minor compaction (all files) 2024-12-03T15:22:58,599 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/A is initiating minor compaction (all files) 2024-12-03T15:22:58,599 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/B in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,599 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/A in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,599 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/0e7503e847174867abc634f4c2142ca1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/043f3b299472450cbe2182485e2c4b42, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/1f3127b6724e4d73ba2aa50398093fc8] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=35.5 K 2024-12-03T15:22:58,599 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/0e0ce537095e44a59ab8184a1fef7c44, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/d9fb26d871d940268a5af641b7351e77, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/f25a33c1d9ca4a53868b272e163676aa] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=91.0 K 2024-12-03T15:22:58,599 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,600 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/0e0ce537095e44a59ab8184a1fef7c44, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/d9fb26d871d940268a5af641b7351e77, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/f25a33c1d9ca4a53868b272e163676aa] 2024-12-03T15:22:58,600 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e0ce537095e44a59ab8184a1fef7c44, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239373556 2024-12-03T15:22:58,600 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e7503e847174867abc634f4c2142ca1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239373556 2024-12-03T15:22:58,600 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9fb26d871d940268a5af641b7351e77, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1733239373654 2024-12-03T15:22:58,601 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting f25a33c1d9ca4a53868b272e163676aa, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733239374782 2024-12-03T15:22:58,601 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 043f3b299472450cbe2182485e2c4b42, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1733239373654 2024-12-03T15:22:58,601 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f3127b6724e4d73ba2aa50398093fc8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733239374782 2024-12-03T15:22:58,608 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:58,609 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120390f405d1607f45598e1f523442949f5a_365d5514a5bb1b4ce69a418d0f3e9934 store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:58,610 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#B#compaction#514 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:58,611 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/2191b64233fc43c19a4c2a33dab3809d is 50, key is test_row_0/B:col10/1733239374786/Put/seqid=0 2024-12-03T15:22:58,611 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120390f405d1607f45598e1f523442949f5a_365d5514a5bb1b4ce69a418d0f3e9934, store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:58,611 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120390f405d1607f45598e1f523442949f5a_365d5514a5bb1b4ce69a418d0f3e9934 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:58,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742435_1611 (size=12409) 2024-12-03T15:22:58,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742436_1612 (size=4469) 2024-12-03T15:22:58,623 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#A#compaction#513 average throughput is 1.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:58,624 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/e23957a295704dc6990c1d5c7c58e172 is 175, key is test_row_0/A:col10/1733239374786/Put/seqid=0 2024-12-03T15:22:58,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742437_1613 (size=31363) 2024-12-03T15:22:58,632 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/e23957a295704dc6990c1d5c7c58e172 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e23957a295704dc6990c1d5c7c58e172 2024-12-03T15:22:58,638 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:58,642 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/A of 365d5514a5bb1b4ce69a418d0f3e9934 into e23957a295704dc6990c1d5c7c58e172(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:58,642 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:58,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-03T15:22:58,642 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/A, priority=13, startTime=1733239378598; duration=0sec 2024-12-03T15:22:58,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,642 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:22:58,642 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:A 2024-12-03T15:22:58,642 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:22:58,642 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-03T15:22:58,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:22:58,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:58,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:22:58,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:58,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:22:58,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:22:58,643 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:22:58,644 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/C is initiating minor compaction (all files) 2024-12-03T15:22:58,644 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/C in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:58,644 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/badf67c079234d21b1ea7241d92f654e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/fd278a6c3d28461099bb0679025532b3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/f6a5843c8b54467a94dbedb32a924ed1] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=35.5 K 2024-12-03T15:22:58,644 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting badf67c079234d21b1ea7241d92f654e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733239373556 2024-12-03T15:22:58,644 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd278a6c3d28461099bb0679025532b3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1733239373654 2024-12-03T15:22:58,645 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6a5843c8b54467a94dbedb32a924ed1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733239374782 2024-12-03T15:22:58,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203f6126583e3314b499b914e7f067288a8_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239376970/Put/seqid=0 2024-12-03T15:22:58,652 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#C#compaction#516 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:22:58,652 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/878fe19176ce41b1b3cfb7ff291ab1c1 is 50, key is test_row_0/C:col10/1733239374786/Put/seqid=0 2024-12-03T15:22:58,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742439_1615 (size=12409) 2024-12-03T15:22:58,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742438_1614 (size=12304) 2024-12-03T15:22:58,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-03T15:22:59,025 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/2191b64233fc43c19a4c2a33dab3809d as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/2191b64233fc43c19a4c2a33dab3809d 2024-12-03T15:22:59,029 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/B of 365d5514a5bb1b4ce69a418d0f3e9934 into 2191b64233fc43c19a4c2a33dab3809d(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:59,030 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:59,030 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/B, priority=13, startTime=1733239378598; duration=0sec 2024-12-03T15:22:59,030 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:59,030 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:B 2024-12-03T15:22:59,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,063 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203f6126583e3314b499b914e7f067288a8_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203f6126583e3314b499b914e7f067288a8_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:59,064 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/878fe19176ce41b1b3cfb7ff291ab1c1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/878fe19176ce41b1b3cfb7ff291ab1c1 2024-12-03T15:22:59,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/bf0713e8d5d7449ebe5fe90432e06d9e, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:22:59,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/bf0713e8d5d7449ebe5fe90432e06d9e is 175, key is test_row_0/A:col10/1733239376970/Put/seqid=0 2024-12-03T15:22:59,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742440_1616 (size=31105) 2024-12-03T15:22:59,068 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/C of 365d5514a5bb1b4ce69a418d0f3e9934 into 878fe19176ce41b1b3cfb7ff291ab1c1(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:22:59,068 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:59,068 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/C, priority=13, startTime=1733239378598; duration=0sec 2024-12-03T15:22:59,068 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:22:59,068 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:C 2024-12-03T15:22:59,068 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/bf0713e8d5d7449ebe5fe90432e06d9e 2024-12-03T15:22:59,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/9586fbc2c92747b683dc6f43a1b7e9eb is 50, key is test_row_0/B:col10/1733239376970/Put/seqid=0 2024-12-03T15:22:59,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742441_1617 (size=12151) 2024-12-03T15:22:59,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:22:59,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:22:59,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239439092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239439093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239439094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239439094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239439195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239439196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239439197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239439197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239439398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239439398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239439400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239439400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,479 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/9586fbc2c92747b683dc6f43a1b7e9eb 2024-12-03T15:22:59,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/ef7bf78c174c425188b2d703d597c233 is 50, key is test_row_0/C:col10/1733239376970/Put/seqid=0 2024-12-03T15:22:59,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742442_1618 (size=12151) 2024-12-03T15:22:59,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239439700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239439701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239439702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,704 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:22:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239439702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:22:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-03T15:22:59,890 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/ef7bf78c174c425188b2d703d597c233 2024-12-03T15:22:59,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/bf0713e8d5d7449ebe5fe90432e06d9e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/bf0713e8d5d7449ebe5fe90432e06d9e 2024-12-03T15:22:59,897 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/bf0713e8d5d7449ebe5fe90432e06d9e, entries=150, sequenceid=161, filesize=30.4 K 2024-12-03T15:22:59,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/9586fbc2c92747b683dc6f43a1b7e9eb as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/9586fbc2c92747b683dc6f43a1b7e9eb 2024-12-03T15:22:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,901 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/9586fbc2c92747b683dc6f43a1b7e9eb, entries=150, sequenceid=161, filesize=11.9 K 2024-12-03T15:22:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/ef7bf78c174c425188b2d703d597c233 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/ef7bf78c174c425188b2d703d597c233 2024-12-03T15:22:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,904 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/ef7bf78c174c425188b2d703d597c233, entries=150, sequenceid=161, filesize=11.9 K 2024-12-03T15:22:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,905 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 365d5514a5bb1b4ce69a418d0f3e9934 in 1263ms, sequenceid=161, compaction requested=false 2024-12-03T15:22:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:22:59,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:22:59,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-12-03T15:22:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-12-03T15:22:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,908 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-12-03T15:22:59,908 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1860 sec 2024-12-03T15:22:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,909 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 2.1900 sec 2024-12-03T15:22:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:22:59,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-03T15:23:00,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:00,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:23:00,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:00,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:23:00,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:00,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:23:00,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:00,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412038ed018eba3e94ceb94c7da21941ca698_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239380207/Put/seqid=0 2024-12-03T15:23:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742443_1619 (size=12304) 2024-12-03T15:23:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239440232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239440233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239440234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239440234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239440335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239440336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239440336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239440336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239440538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239440538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239440538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239440539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,622 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:00,626 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412038ed018eba3e94ceb94c7da21941ca698_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412038ed018eba3e94ceb94c7da21941ca698_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:00,628 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/e76b7f089a674f6ea7a57d4e165ddb02, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:00,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/e76b7f089a674f6ea7a57d4e165ddb02 is 175, key is test_row_0/A:col10/1733239380207/Put/seqid=0 2024-12-03T15:23:00,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742444_1620 (size=31101) 2024-12-03T15:23:00,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239440840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239440840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239440841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:00,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:00,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239440842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:01,033 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/e76b7f089a674f6ea7a57d4e165ddb02 2024-12-03T15:23:01,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/459fc0d879e542929588a690349eb780 is 50, key is test_row_0/B:col10/1733239380207/Put/seqid=0 2024-12-03T15:23:01,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742445_1621 (size=9757) 2024-12-03T15:23:01,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:01,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239441343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:01,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:01,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239441343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:01,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:01,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239441344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:01,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:01,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239441345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:01,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/459fc0d879e542929588a690349eb780 2024-12-03T15:23:01,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/afd17a064567420b856e95e6d4f08ffd is 50, key is test_row_0/C:col10/1733239380207/Put/seqid=0 2024-12-03T15:23:01,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742446_1622 (size=9757) 2024-12-03T15:23:01,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:01,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51648 deadline: 1733239441809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:01,811 DEBUG [Thread-2540 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8156 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., hostname=2b5ef621a0dd,46815,1733239226292, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-03T15:23:01,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-03T15:23:01,824 INFO [Thread-2542 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-12-03T15:23:01,825 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:23:01,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-12-03T15:23:01,826 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:23:01,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-03T15:23:01,827 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:23:01,827 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:23:01,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/afd17a064567420b856e95e6d4f08ffd 2024-12-03T15:23:01,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/e76b7f089a674f6ea7a57d4e165ddb02 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e76b7f089a674f6ea7a57d4e165ddb02 2024-12-03T15:23:01,859 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e76b7f089a674f6ea7a57d4e165ddb02, entries=150, sequenceid=175, filesize=30.4 K 2024-12-03T15:23:01,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/459fc0d879e542929588a690349eb780 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/459fc0d879e542929588a690349eb780 2024-12-03T15:23:01,862 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/459fc0d879e542929588a690349eb780, entries=100, sequenceid=175, filesize=9.5 K 2024-12-03T15:23:01,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/afd17a064567420b856e95e6d4f08ffd as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/afd17a064567420b856e95e6d4f08ffd 2024-12-03T15:23:01,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/afd17a064567420b856e95e6d4f08ffd, entries=100, sequenceid=175, filesize=9.5 K 2024-12-03T15:23:01,866 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 365d5514a5bb1b4ce69a418d0f3e9934 in 1658ms, sequenceid=175, compaction requested=true 2024-12-03T15:23:01,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:01,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:23:01,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:01,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:23:01,866 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:23:01,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:01,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:23:01,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:23:01,866 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:23:01,867 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34317 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:23:01,867 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93569 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:23:01,867 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/B is initiating minor compaction (all files) 2024-12-03T15:23:01,867 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/A is initiating minor compaction (all files) 2024-12-03T15:23:01,867 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/B in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:01,867 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/A in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:01,867 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e23957a295704dc6990c1d5c7c58e172, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/bf0713e8d5d7449ebe5fe90432e06d9e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e76b7f089a674f6ea7a57d4e165ddb02] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=91.4 K 2024-12-03T15:23:01,867 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/2191b64233fc43c19a4c2a33dab3809d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/9586fbc2c92747b683dc6f43a1b7e9eb, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/459fc0d879e542929588a690349eb780] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=33.5 K 2024-12-03T15:23:01,867 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:01,867 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e23957a295704dc6990c1d5c7c58e172, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/bf0713e8d5d7449ebe5fe90432e06d9e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e76b7f089a674f6ea7a57d4e165ddb02] 2024-12-03T15:23:01,867 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 2191b64233fc43c19a4c2a33dab3809d, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733239374782 2024-12-03T15:23:01,867 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting e23957a295704dc6990c1d5c7c58e172, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733239374782 2024-12-03T15:23:01,867 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 9586fbc2c92747b683dc6f43a1b7e9eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733239376965 2024-12-03T15:23:01,867 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf0713e8d5d7449ebe5fe90432e06d9e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733239376965 2024-12-03T15:23:01,868 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 459fc0d879e542929588a690349eb780, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733239379093 2024-12-03T15:23:01,868 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting e76b7f089a674f6ea7a57d4e165ddb02, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733239379093 2024-12-03T15:23:01,872 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:01,876 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412033f59c039c8474b51b6694bef104e08e0_365d5514a5bb1b4ce69a418d0f3e9934 store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:01,876 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#B#compaction#523 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:23:01,877 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412033f59c039c8474b51b6694bef104e08e0_365d5514a5bb1b4ce69a418d0f3e9934, store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:01,877 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412033f59c039c8474b51b6694bef104e08e0_365d5514a5bb1b4ce69a418d0f3e9934 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:01,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742447_1623 (size=4469) 2024-12-03T15:23:01,881 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/c7b8b29374744e91af3858a0a916cd33 is 50, key is test_row_0/B:col10/1733239380207/Put/seqid=0 2024-12-03T15:23:01,882 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#A#compaction#522 average throughput is 2.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:23:01,882 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/85e7fd117998400f9b11f0ecdc482be3 is 175, key is test_row_0/A:col10/1733239380207/Put/seqid=0 2024-12-03T15:23:01,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742449_1625 (size=31622) 2024-12-03T15:23:01,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742448_1624 (size=12561) 2024-12-03T15:23:01,892 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/85e7fd117998400f9b11f0ecdc482be3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/85e7fd117998400f9b11f0ecdc482be3 2024-12-03T15:23:01,895 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/A of 365d5514a5bb1b4ce69a418d0f3e9934 into 85e7fd117998400f9b11f0ecdc482be3(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:23:01,895 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:01,895 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/A, priority=13, startTime=1733239381866; duration=0sec 2024-12-03T15:23:01,896 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:23:01,896 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:A 2024-12-03T15:23:01,896 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:23:01,896 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34317 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:23:01,896 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/C is initiating minor compaction (all files) 2024-12-03T15:23:01,897 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/C in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:01,897 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/878fe19176ce41b1b3cfb7ff291ab1c1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/ef7bf78c174c425188b2d703d597c233, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/afd17a064567420b856e95e6d4f08ffd] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=33.5 K 2024-12-03T15:23:01,897 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 878fe19176ce41b1b3cfb7ff291ab1c1, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1733239374782 2024-12-03T15:23:01,897 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef7bf78c174c425188b2d703d597c233, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733239376965 2024-12-03T15:23:01,897 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting afd17a064567420b856e95e6d4f08ffd, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733239379093 2024-12-03T15:23:01,904 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#C#compaction#524 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:23:01,905 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/98b88daba9a14e9a893cdbeeab202b55 is 50, key is test_row_0/C:col10/1733239380207/Put/seqid=0 2024-12-03T15:23:01,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742450_1626 (size=12561) 2024-12-03T15:23:01,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-03T15:23:01,978 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:01,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-03T15:23:01,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:01,979 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-03T15:23:01,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:23:01,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:01,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:23:01,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:01,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:23:01,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:01,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412037ea0ae084b6744b78355d6879faec200_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239380233/Put/seqid=0 2024-12-03T15:23:01,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742451_1627 (size=12304) 2024-12-03T15:23:02,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-03T15:23:02,292 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/c7b8b29374744e91af3858a0a916cd33 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c7b8b29374744e91af3858a0a916cd33 2024-12-03T15:23:02,296 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/B of 365d5514a5bb1b4ce69a418d0f3e9934 into c7b8b29374744e91af3858a0a916cd33(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:23:02,296 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:02,296 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/B, priority=13, startTime=1733239381866; duration=0sec 2024-12-03T15:23:02,296 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:02,296 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:B 2024-12-03T15:23:02,313 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/98b88daba9a14e9a893cdbeeab202b55 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/98b88daba9a14e9a893cdbeeab202b55 2024-12-03T15:23:02,316 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/C of 365d5514a5bb1b4ce69a418d0f3e9934 into 98b88daba9a14e9a893cdbeeab202b55(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:23:02,316 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:02,316 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/C, priority=13, startTime=1733239381866; duration=0sec 2024-12-03T15:23:02,316 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:02,316 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:C 2024-12-03T15:23:02,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:02,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:02,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239442353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239442359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239442360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239442360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:02,392 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412037ea0ae084b6744b78355d6879faec200_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412037ea0ae084b6744b78355d6879faec200_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:02,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/901a6f08c01c42f3b790356cf6ef090e, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:02,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/901a6f08c01c42f3b790356cf6ef090e is 175, key is test_row_0/A:col10/1733239380233/Put/seqid=0 2024-12-03T15:23:02,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742452_1628 (size=31105) 2024-12-03T15:23:02,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-03T15:23:02,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239442460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239442462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239442463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239442466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239442661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239442664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239442664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239442668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,807 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=201, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/901a6f08c01c42f3b790356cf6ef090e 2024-12-03T15:23:02,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/6e78c376b8954caaaca68e69bf47c73a is 50, key is test_row_0/B:col10/1733239380233/Put/seqid=0 2024-12-03T15:23:02,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742453_1629 (size=12151) 2024-12-03T15:23:02,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-03T15:23:02,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239442966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239442967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239442967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:02,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:02,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239442969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:03,218 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/6e78c376b8954caaaca68e69bf47c73a 2024-12-03T15:23:03,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/437cbf1c91594d8e9aad2e6a23997eba is 50, key is test_row_0/C:col10/1733239380233/Put/seqid=0 2024-12-03T15:23:03,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742454_1630 (size=12151) 2024-12-03T15:23:03,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:03,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239443468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:03,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:03,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239443471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:03,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:03,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239443471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:03,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:03,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239443471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:03,629 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/437cbf1c91594d8e9aad2e6a23997eba 2024-12-03T15:23:03,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/901a6f08c01c42f3b790356cf6ef090e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/901a6f08c01c42f3b790356cf6ef090e 2024-12-03T15:23:03,635 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/901a6f08c01c42f3b790356cf6ef090e, entries=150, sequenceid=201, filesize=30.4 K 2024-12-03T15:23:03,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/6e78c376b8954caaaca68e69bf47c73a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/6e78c376b8954caaaca68e69bf47c73a 2024-12-03T15:23:03,639 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/6e78c376b8954caaaca68e69bf47c73a, entries=150, sequenceid=201, filesize=11.9 K 2024-12-03T15:23:03,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/437cbf1c91594d8e9aad2e6a23997eba as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/437cbf1c91594d8e9aad2e6a23997eba 2024-12-03T15:23:03,642 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/437cbf1c91594d8e9aad2e6a23997eba, entries=150, sequenceid=201, filesize=11.9 K 2024-12-03T15:23:03,643 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 365d5514a5bb1b4ce69a418d0f3e9934 in 1664ms, sequenceid=201, compaction requested=false 2024-12-03T15:23:03,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:03,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:03,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-12-03T15:23:03,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-12-03T15:23:03,645 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-03T15:23:03,645 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8170 sec 2024-12-03T15:23:03,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 1.8200 sec 2024-12-03T15:23:03,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-03T15:23:03,930 INFO [Thread-2542 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-03T15:23:03,931 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:23:03,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-12-03T15:23:03,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-03T15:23:03,932 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:23:03,933 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:23:03,933 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:23:04,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-03T15:23:04,086 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-03T15:23:04,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:04,087 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-03T15:23:04,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:23:04,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:04,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:23:04,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:04,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:23:04,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:04,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412031824e1a7938d4ed4af1b0be93a5e1648_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239382355/Put/seqid=0 2024-12-03T15:23:04,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742455_1631 (size=12304) 2024-12-03T15:23:04,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:04,099 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412031824e1a7938d4ed4af1b0be93a5e1648_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412031824e1a7938d4ed4af1b0be93a5e1648_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:04,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/2277b0aaa8fc4f6e815d5451e6d48a0b, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:04,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/2277b0aaa8fc4f6e815d5451e6d48a0b is 175, key is test_row_0/A:col10/1733239382355/Put/seqid=0 2024-12-03T15:23:04,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742456_1632 (size=31105) 2024-12-03T15:23:04,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-03T15:23:04,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:04,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:04,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:04,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239444493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:04,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239444494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:04,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239444494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:04,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239444496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,504 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=216, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/2277b0aaa8fc4f6e815d5451e6d48a0b 2024-12-03T15:23:04,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/d95d4a415d814191910317c199d47a69 is 50, key is test_row_0/B:col10/1733239382355/Put/seqid=0 2024-12-03T15:23:04,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742457_1633 (size=12151) 2024-12-03T15:23:04,513 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/d95d4a415d814191910317c199d47a69 2024-12-03T15:23:04,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/dec88a85d6a8476da21856d180ab476b is 50, key is test_row_0/C:col10/1733239382355/Put/seqid=0 2024-12-03T15:23:04,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742458_1634 (size=12151) 2024-12-03T15:23:04,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-03T15:23:04,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:04,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239444597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:04,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239444597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:04,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239444597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:04,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239444599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:04,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239444800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:04,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239444800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:04,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239444801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:04,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239444802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:04,928 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/dec88a85d6a8476da21856d180ab476b 2024-12-03T15:23:04,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/2277b0aaa8fc4f6e815d5451e6d48a0b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2277b0aaa8fc4f6e815d5451e6d48a0b 2024-12-03T15:23:04,934 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2277b0aaa8fc4f6e815d5451e6d48a0b, entries=150, sequenceid=216, filesize=30.4 K 2024-12-03T15:23:04,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/d95d4a415d814191910317c199d47a69 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/d95d4a415d814191910317c199d47a69 2024-12-03T15:23:04,937 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/d95d4a415d814191910317c199d47a69, entries=150, sequenceid=216, filesize=11.9 K 2024-12-03T15:23:04,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/dec88a85d6a8476da21856d180ab476b as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/dec88a85d6a8476da21856d180ab476b 2024-12-03T15:23:04,940 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/dec88a85d6a8476da21856d180ab476b, entries=150, sequenceid=216, filesize=11.9 K 2024-12-03T15:23:04,941 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 365d5514a5bb1b4ce69a418d0f3e9934 in 855ms, sequenceid=216, compaction requested=true 2024-12-03T15:23:04,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:04,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:04,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-12-03T15:23:04,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-12-03T15:23:04,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-03T15:23:04,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0090 sec 2024-12-03T15:23:04,944 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 1.0120 sec 2024-12-03T15:23:05,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-03T15:23:05,035 INFO [Thread-2542 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-12-03T15:23:05,037 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:23:05,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-03T15:23:05,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-03T15:23:05,038 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:23:05,038 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:23:05,038 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:23:05,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:05,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-03T15:23:05,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:23:05,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:05,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:23:05,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:05,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:23:05,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:05,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412030e7806a8112d49929d6c3859d750337c_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239384495/Put/seqid=0 2024-12-03T15:23:05,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239445110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,113 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239445111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239445111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742459_1635 (size=12304) 2024-12-03T15:23:05,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239445112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-03T15:23:05,191 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-03T15:23:05,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:05,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:05,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:05,191 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239445213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239445214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239445214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239445215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-03T15:23:05,343 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-03T15:23:05,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:05,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:05,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:05,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239445416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239445417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239445417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239445418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,495 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-03T15:23:05,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:05,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:05,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:05,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,515 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:05,518 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412030e7806a8112d49929d6c3859d750337c_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412030e7806a8112d49929d6c3859d750337c_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:05,518 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/693d43288c074c468571be115c7c7840, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:05,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/693d43288c074c468571be115c7c7840 is 175, key is test_row_0/A:col10/1733239384495/Put/seqid=0 2024-12-03T15:23:05,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742460_1636 (size=31105) 2024-12-03T15:23:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-03T15:23:05,648 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-03T15:23:05,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:05,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:05,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:05,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239445720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239445721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239445722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:05,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239445722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,800 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-03T15:23:05,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:05,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:05,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:05,801 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,923 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=241, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/693d43288c074c468571be115c7c7840 2024-12-03T15:23:05,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/684dcd1fb33d46289a7157aa29b96260 is 50, key is test_row_0/B:col10/1733239384495/Put/seqid=0 2024-12-03T15:23:05,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742461_1637 (size=12151) 2024-12-03T15:23:05,953 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:05,953 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-03T15:23:05,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:05,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:05,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:05,953 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:05,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,105 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:06,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-03T15:23:06,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:06,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,106 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-03T15:23:06,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:06,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239446222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:06,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:06,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239446227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:06,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:06,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239446228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:06,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:06,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239446228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:06,258 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:06,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-03T15:23:06,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:06,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/684dcd1fb33d46289a7157aa29b96260 2024-12-03T15:23:06,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/3107f31b85dc454a9e5310aa8d637059 is 50, key is test_row_0/C:col10/1733239384495/Put/seqid=0 2024-12-03T15:23:06,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742462_1638 (size=12151) 2024-12-03T15:23:06,411 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:06,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-03T15:23:06,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:06,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,563 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:06,564 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-03T15:23:06,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:06,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,565 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,717 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:06,718 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-03T15:23:06,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:06,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,718 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:06,766 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/3107f31b85dc454a9e5310aa8d637059 2024-12-03T15:23:06,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/693d43288c074c468571be115c7c7840 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/693d43288c074c468571be115c7c7840 2024-12-03T15:23:06,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/693d43288c074c468571be115c7c7840, entries=150, sequenceid=241, filesize=30.4 K 2024-12-03T15:23:06,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/684dcd1fb33d46289a7157aa29b96260 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/684dcd1fb33d46289a7157aa29b96260 2024-12-03T15:23:06,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/684dcd1fb33d46289a7157aa29b96260, entries=150, sequenceid=241, filesize=11.9 K 2024-12-03T15:23:06,802 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/3107f31b85dc454a9e5310aa8d637059 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/3107f31b85dc454a9e5310aa8d637059 2024-12-03T15:23:06,824 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/3107f31b85dc454a9e5310aa8d637059, entries=150, sequenceid=241, filesize=11.9 K 2024-12-03T15:23:06,829 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 365d5514a5bb1b4ce69a418d0f3e9934 in 1725ms, sequenceid=241, compaction requested=true 2024-12-03T15:23:06,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:06,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:23:06,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:06,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:23:06,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:06,829 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:23:06,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:23:06,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:23:06,829 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:23:06,831 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:23:06,831 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124937 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:23:06,831 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/A is initiating minor compaction (all files) 2024-12-03T15:23:06,831 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/B is initiating minor compaction (all files) 2024-12-03T15:23:06,831 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/A in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,831 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/B in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,831 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/85e7fd117998400f9b11f0ecdc482be3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/901a6f08c01c42f3b790356cf6ef090e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2277b0aaa8fc4f6e815d5451e6d48a0b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/693d43288c074c468571be115c7c7840] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=122.0 K 2024-12-03T15:23:06,832 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c7b8b29374744e91af3858a0a916cd33, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/6e78c376b8954caaaca68e69bf47c73a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/d95d4a415d814191910317c199d47a69, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/684dcd1fb33d46289a7157aa29b96260] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=47.9 K 2024-12-03T15:23:06,832 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,832 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/85e7fd117998400f9b11f0ecdc482be3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/901a6f08c01c42f3b790356cf6ef090e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2277b0aaa8fc4f6e815d5451e6d48a0b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/693d43288c074c468571be115c7c7840] 2024-12-03T15:23:06,832 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c7b8b29374744e91af3858a0a916cd33, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733239376965 2024-12-03T15:23:06,832 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85e7fd117998400f9b11f0ecdc482be3, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733239376965 2024-12-03T15:23:06,832 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e78c376b8954caaaca68e69bf47c73a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1733239380231 2024-12-03T15:23:06,832 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 901a6f08c01c42f3b790356cf6ef090e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1733239380231 2024-12-03T15:23:06,833 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting d95d4a415d814191910317c199d47a69, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733239382352 2024-12-03T15:23:06,833 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2277b0aaa8fc4f6e815d5451e6d48a0b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733239382352 2024-12-03T15:23:06,833 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 684dcd1fb33d46289a7157aa29b96260, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733239384492 2024-12-03T15:23:06,833 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 693d43288c074c468571be115c7c7840, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733239384492 2024-12-03T15:23:06,842 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:06,855 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#B#compaction#535 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:23:06,855 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/c98f13d1881946efbe6cdcd79aed2c1f is 50, key is test_row_0/B:col10/1733239384495/Put/seqid=0 2024-12-03T15:23:06,864 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412030fa42824c326457ebd55c001d7426ab7_365d5514a5bb1b4ce69a418d0f3e9934 store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:06,866 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412030fa42824c326457ebd55c001d7426ab7_365d5514a5bb1b4ce69a418d0f3e9934, store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:06,867 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412030fa42824c326457ebd55c001d7426ab7_365d5514a5bb1b4ce69a418d0f3e9934 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:06,873 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:06,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742463_1639 (size=12697) 2024-12-03T15:23:06,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-03T15:23:06,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,878 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-03T15:23:06,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:23:06,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:06,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:23:06,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:06,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:23:06,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:06,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203c8866e70a1b142888b686bf1c8a49465_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239385111/Put/seqid=0 2024-12-03T15:23:06,895 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/c98f13d1881946efbe6cdcd79aed2c1f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c98f13d1881946efbe6cdcd79aed2c1f 2024-12-03T15:23:06,900 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/B of 365d5514a5bb1b4ce69a418d0f3e9934 into c98f13d1881946efbe6cdcd79aed2c1f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:23:06,900 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:06,900 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/B, priority=12, startTime=1733239386829; duration=0sec 2024-12-03T15:23:06,900 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:23:06,900 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:B 2024-12-03T15:23:06,900 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-03T15:23:06,902 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-03T15:23:06,902 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/C is initiating minor compaction (all files) 2024-12-03T15:23:06,902 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/C in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:06,902 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/98b88daba9a14e9a893cdbeeab202b55, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/437cbf1c91594d8e9aad2e6a23997eba, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/dec88a85d6a8476da21856d180ab476b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/3107f31b85dc454a9e5310aa8d637059] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=47.9 K 2024-12-03T15:23:06,902 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 98b88daba9a14e9a893cdbeeab202b55, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733239376965 2024-12-03T15:23:06,903 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 437cbf1c91594d8e9aad2e6a23997eba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1733239380231 2024-12-03T15:23:06,903 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting dec88a85d6a8476da21856d180ab476b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733239382352 2024-12-03T15:23:06,904 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 3107f31b85dc454a9e5310aa8d637059, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733239384492 2024-12-03T15:23:06,930 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#C#compaction#537 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:23:06,931 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/d65d8c06782e413998395c3458bef580 is 50, key is test_row_0/C:col10/1733239384495/Put/seqid=0 2024-12-03T15:23:06,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742464_1640 (size=4469) 2024-12-03T15:23:06,935 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#A#compaction#534 average throughput is 0.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:23:06,935 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/ec631d47428c4692b2df1aab21fbbf6e is 175, key is test_row_0/A:col10/1733239384495/Put/seqid=0 2024-12-03T15:23:06,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742465_1641 (size=12304) 2024-12-03T15:23:06,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:06,952 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203c8866e70a1b142888b686bf1c8a49465_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203c8866e70a1b142888b686bf1c8a49465_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:06,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742467_1643 (size=31651) 2024-12-03T15:23:06,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/238892ce4533470c95a4b19c766dcc41, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:06,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/238892ce4533470c95a4b19c766dcc41 is 175, key is test_row_0/A:col10/1733239385111/Put/seqid=0 2024-12-03T15:23:06,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742466_1642 (size=12697) 2024-12-03T15:23:06,971 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/d65d8c06782e413998395c3458bef580 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/d65d8c06782e413998395c3458bef580 2024-12-03T15:23:06,976 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/C of 365d5514a5bb1b4ce69a418d0f3e9934 into d65d8c06782e413998395c3458bef580(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:23:06,976 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:06,976 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/C, priority=12, startTime=1733239386829; duration=0sec 2024-12-03T15:23:06,976 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:06,976 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:C 2024-12-03T15:23:06,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742468_1644 (size=31105) 2024-12-03T15:23:07,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-03T15:23:07,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:07,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:07,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239447260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239447260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239447260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239447260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,358 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/ec631d47428c4692b2df1aab21fbbf6e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/ec631d47428c4692b2df1aab21fbbf6e 2024-12-03T15:23:07,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239447363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239447369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,369 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/A of 365d5514a5bb1b4ce69a418d0f3e9934 into ec631d47428c4692b2df1aab21fbbf6e(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:23:07,369 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:07,369 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/A, priority=12, startTime=1733239386829; duration=0sec 2024-12-03T15:23:07,369 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:07,369 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:A 2024-12-03T15:23:07,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239447369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239447369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,388 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=252, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/238892ce4533470c95a4b19c766dcc41 2024-12-03T15:23:07,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/cb96e92e06cd4ad3bdf290e54d776060 is 50, key is test_row_0/B:col10/1733239385111/Put/seqid=0 2024-12-03T15:23:07,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742469_1645 (size=12151) 2024-12-03T15:23:07,412 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/cb96e92e06cd4ad3bdf290e54d776060 2024-12-03T15:23:07,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/7291994cf5334afc933da7e15c6e4a5e is 50, key is test_row_0/C:col10/1733239385111/Put/seqid=0 2024-12-03T15:23:07,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742470_1646 (size=12151) 2024-12-03T15:23:07,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239447567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239447570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239447570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239447574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,841 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/7291994cf5334afc933da7e15c6e4a5e 2024-12-03T15:23:07,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/238892ce4533470c95a4b19c766dcc41 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/238892ce4533470c95a4b19c766dcc41 2024-12-03T15:23:07,859 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/238892ce4533470c95a4b19c766dcc41, entries=150, sequenceid=252, filesize=30.4 K 2024-12-03T15:23:07,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/cb96e92e06cd4ad3bdf290e54d776060 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/cb96e92e06cd4ad3bdf290e54d776060 2024-12-03T15:23:07,865 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/cb96e92e06cd4ad3bdf290e54d776060, entries=150, sequenceid=252, filesize=11.9 K 2024-12-03T15:23:07,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/7291994cf5334afc933da7e15c6e4a5e as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/7291994cf5334afc933da7e15c6e4a5e 2024-12-03T15:23:07,869 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/7291994cf5334afc933da7e15c6e4a5e, entries=150, sequenceid=252, filesize=11.9 K 2024-12-03T15:23:07,870 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 365d5514a5bb1b4ce69a418d0f3e9934 in 992ms, sequenceid=252, compaction requested=false 2024-12-03T15:23:07,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:07,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:07,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-03T15:23:07,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-03T15:23:07,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:07,872 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-03T15:23:07,872 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-03T15:23:07,872 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8330 sec 2024-12-03T15:23:07,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:23:07,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:07,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:23:07,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:07,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:23:07,872 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:07,875 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 2.8360 sec 2024-12-03T15:23:07,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203ecbb0c4edf77467fb4459fa8817b2d8e_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239387259/Put/seqid=0 2024-12-03T15:23:07,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742471_1647 (size=14994) 2024-12-03T15:23:07,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239447885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239447889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239447889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239447890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239447991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239447991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239447992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:07,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:07,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239447993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:08,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239448193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:08,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239448194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:08,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:08,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239448195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:08,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239448196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:08,282 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:08,284 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203ecbb0c4edf77467fb4459fa8817b2d8e_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203ecbb0c4edf77467fb4459fa8817b2d8e_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:08,285 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/1784dc7940bc41c692d25c837364b60a, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:08,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/1784dc7940bc41c692d25c837364b60a is 175, key is test_row_0/A:col10/1733239387259/Put/seqid=0 2024-12-03T15:23:08,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742472_1648 (size=39949) 2024-12-03T15:23:08,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:08,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239448498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:08,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:08,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239448498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:08,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:08,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239448498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:08,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:08,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239448499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:08,690 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=281, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/1784dc7940bc41c692d25c837364b60a 2024-12-03T15:23:08,696 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/a77a592a204c46278f7f2d21519ab059 is 50, key is test_row_0/B:col10/1733239387259/Put/seqid=0 2024-12-03T15:23:08,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742473_1649 (size=12301) 2024-12-03T15:23:09,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:09,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239449002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:09,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:09,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239449002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:09,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:09,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239449004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:09,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:09,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239449004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:09,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/a77a592a204c46278f7f2d21519ab059 2024-12-03T15:23:09,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/102b3bc79f794c42b5b9a5d23051acf7 is 50, key is test_row_0/C:col10/1733239387259/Put/seqid=0 2024-12-03T15:23:09,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742474_1650 (size=12301) 2024-12-03T15:23:09,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-03T15:23:09,142 INFO [Thread-2542 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-03T15:23:09,143 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-03T15:23:09,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-12-03T15:23:09,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-03T15:23:09,144 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-03T15:23:09,145 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-03T15:23:09,145 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-03T15:23:09,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-03T15:23:09,296 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:09,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-03T15:23:09,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:09,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:09,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:09,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:09,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:09,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:09,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-03T15:23:09,449 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:09,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-03T15:23:09,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:09,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:09,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:09,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:09,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:09,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-03T15:23:09,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=281 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/102b3bc79f794c42b5b9a5d23051acf7 2024-12-03T15:23:09,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/1784dc7940bc41c692d25c837364b60a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/1784dc7940bc41c692d25c837364b60a 2024-12-03T15:23:09,515 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/1784dc7940bc41c692d25c837364b60a, entries=200, sequenceid=281, filesize=39.0 K 2024-12-03T15:23:09,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/a77a592a204c46278f7f2d21519ab059 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/a77a592a204c46278f7f2d21519ab059 2024-12-03T15:23:09,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/a77a592a204c46278f7f2d21519ab059, entries=150, sequenceid=281, filesize=12.0 K 2024-12-03T15:23:09,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/102b3bc79f794c42b5b9a5d23051acf7 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/102b3bc79f794c42b5b9a5d23051acf7 2024-12-03T15:23:09,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/102b3bc79f794c42b5b9a5d23051acf7, entries=150, sequenceid=281, filesize=12.0 K 2024-12-03T15:23:09,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 365d5514a5bb1b4ce69a418d0f3e9934 in 1650ms, sequenceid=281, compaction requested=true 2024-12-03T15:23:09,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:09,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:23:09,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:09,523 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:23:09,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:23:09,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:09,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:23:09,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:23:09,523 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:23:09,523 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102705 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:23:09,523 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:23:09,523 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/A is initiating minor compaction (all files) 2024-12-03T15:23:09,523 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/B is initiating minor compaction (all files) 2024-12-03T15:23:09,523 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/A in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:09,523 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/B in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:09,524 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/ec631d47428c4692b2df1aab21fbbf6e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/238892ce4533470c95a4b19c766dcc41, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/1784dc7940bc41c692d25c837364b60a] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=100.3 K 2024-12-03T15:23:09,524 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c98f13d1881946efbe6cdcd79aed2c1f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/cb96e92e06cd4ad3bdf290e54d776060, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/a77a592a204c46278f7f2d21519ab059] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=36.3 K 2024-12-03T15:23:09,524 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:09,524 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. files: [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/ec631d47428c4692b2df1aab21fbbf6e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/238892ce4533470c95a4b19c766dcc41, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/1784dc7940bc41c692d25c837364b60a] 2024-12-03T15:23:09,524 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting c98f13d1881946efbe6cdcd79aed2c1f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733239384492 2024-12-03T15:23:09,524 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec631d47428c4692b2df1aab21fbbf6e, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733239384492 2024-12-03T15:23:09,524 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting cb96e92e06cd4ad3bdf290e54d776060, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733239385109 2024-12-03T15:23:09,524 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 238892ce4533470c95a4b19c766dcc41, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733239385109 2024-12-03T15:23:09,524 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting a77a592a204c46278f7f2d21519ab059, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1733239387244 2024-12-03T15:23:09,524 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1784dc7940bc41c692d25c837364b60a, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1733239387244 2024-12-03T15:23:09,531 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:09,533 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241203e938f681d6064a29a9827a59fac7a4cf_365d5514a5bb1b4ce69a418d0f3e9934 store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:09,533 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#B#compaction#544 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:23:09,533 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/c327374fb1c24361846ec95ec9d95a22 is 50, key is test_row_0/B:col10/1733239387259/Put/seqid=0 2024-12-03T15:23:09,534 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241203e938f681d6064a29a9827a59fac7a4cf_365d5514a5bb1b4ce69a418d0f3e9934, store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:09,534 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203e938f681d6064a29a9827a59fac7a4cf_365d5514a5bb1b4ce69a418d0f3e9934 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:09,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742475_1651 (size=12949) 2024-12-03T15:23:09,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742476_1652 (size=4469) 2024-12-03T15:23:09,601 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:09,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46815 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-03T15:23:09,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:09,602 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-03T15:23:09,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:23:09,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:09,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:23:09,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:09,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:23:09,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:09,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203893a43351f434b0c85a341bdf1fef626_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239387874/Put/seqid=0 2024-12-03T15:23:09,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742477_1653 (size=12454) 2024-12-03T15:23:09,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-03T15:23:09,938 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#A#compaction#543 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:23:09,939 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/70913925122145d4a15d848560dd0551 is 175, key is test_row_0/A:col10/1733239387259/Put/seqid=0 2024-12-03T15:23:09,940 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/c327374fb1c24361846ec95ec9d95a22 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c327374fb1c24361846ec95ec9d95a22 2024-12-03T15:23:09,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742478_1654 (size=31903) 2024-12-03T15:23:09,945 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/B of 365d5514a5bb1b4ce69a418d0f3e9934 into c327374fb1c24361846ec95ec9d95a22(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:23:09,945 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:09,945 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/B, priority=13, startTime=1733239389523; duration=0sec 2024-12-03T15:23:09,945 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:23:09,945 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:B 2024-12-03T15:23:09,945 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-03T15:23:09,946 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-03T15:23:09,946 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/70913925122145d4a15d848560dd0551 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/70913925122145d4a15d848560dd0551 2024-12-03T15:23:09,946 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1540): 365d5514a5bb1b4ce69a418d0f3e9934/C is initiating minor compaction (all files) 2024-12-03T15:23:09,946 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 365d5514a5bb1b4ce69a418d0f3e9934/C in TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:09,946 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/d65d8c06782e413998395c3458bef580, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/7291994cf5334afc933da7e15c6e4a5e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/102b3bc79f794c42b5b9a5d23051acf7] into tmpdir=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp, totalSize=36.3 K 2024-12-03T15:23:09,947 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting d65d8c06782e413998395c3458bef580, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1733239384492 2024-12-03T15:23:09,947 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 7291994cf5334afc933da7e15c6e4a5e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733239385109 2024-12-03T15:23:09,947 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] compactions.Compactor(224): Compacting 102b3bc79f794c42b5b9a5d23051acf7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=281, earliestPutTs=1733239387244 2024-12-03T15:23:09,953 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 365d5514a5bb1b4ce69a418d0f3e9934#C#compaction#546 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-03T15:23:09,954 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/08c7fb7422d0484486594befb2dfa171 is 50, key is test_row_0/C:col10/1733239387259/Put/seqid=0 2024-12-03T15:23:09,955 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/A of 365d5514a5bb1b4ce69a418d0f3e9934 into 70913925122145d4a15d848560dd0551(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:23:09,955 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:09,955 INFO [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/A, priority=13, startTime=1733239389522; duration=0sec 2024-12-03T15:23:09,956 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:09,956 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:A 2024-12-03T15:23:09,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742479_1655 (size=12949) 2024-12-03T15:23:10,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:10,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. as already flushing 2024-12-03T15:23:10,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:10,014 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203893a43351f434b0c85a341bdf1fef626_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203893a43351f434b0c85a341bdf1fef626_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:10,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/6817c42cac964b89aa6017bbd4158fc3, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:10,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/6817c42cac964b89aa6017bbd4158fc3 is 175, key is test_row_0/A:col10/1733239387874/Put/seqid=0 2024-12-03T15:23:10,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742480_1656 (size=31255) 2024-12-03T15:23:10,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239450027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239450028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239450029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239450030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,071 DEBUG [Thread-2551 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c1d3a95 to 127.0.0.1:60989 2024-12-03T15:23:10,071 DEBUG [Thread-2551 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:10,071 DEBUG [Thread-2545 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f1754bc to 127.0.0.1:60989 2024-12-03T15:23:10,071 DEBUG [Thread-2545 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:10,071 DEBUG [Thread-2547 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d9113f3 to 127.0.0.1:60989 2024-12-03T15:23:10,071 DEBUG [Thread-2547 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:10,072 DEBUG [Thread-2549 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bb75907 to 127.0.0.1:60989 2024-12-03T15:23:10,072 DEBUG [Thread-2549 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:10,073 DEBUG [Thread-2543 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7846cb78 to 127.0.0.1:60989 2024-12-03T15:23:10,073 DEBUG [Thread-2543 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:10,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239450131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239450132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239450132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239450132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-03T15:23:10,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239450333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239450333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239450333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239450334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,371 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/08c7fb7422d0484486594befb2dfa171 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/08c7fb7422d0484486594befb2dfa171 2024-12-03T15:23:10,374 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 365d5514a5bb1b4ce69a418d0f3e9934/C of 365d5514a5bb1b4ce69a418d0f3e9934 into 08c7fb7422d0484486594befb2dfa171(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-03T15:23:10,374 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:10,374 INFO [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934., storeName=365d5514a5bb1b4ce69a418d0f3e9934/C, priority=13, startTime=1733239389523; duration=0sec 2024-12-03T15:23:10,374 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:10,374 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:C 2024-12-03T15:23:10,420 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/6817c42cac964b89aa6017bbd4158fc3 2024-12-03T15:23:10,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/5c7f66da3d7248d293195bab931ff93a is 50, key is test_row_0/B:col10/1733239387874/Put/seqid=0 2024-12-03T15:23:10,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742481_1657 (size=12301) 2024-12-03T15:23:10,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239450635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239450635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:10,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239450636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239450636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:10,828 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/5c7f66da3d7248d293195bab931ff93a 2024-12-03T15:23:10,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/038a57574d0c4238a1a3b1d34387e685 is 50, key is test_row_0/C:col10/1733239387874/Put/seqid=0 2024-12-03T15:23:10,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742482_1658 (size=12301) 2024-12-03T15:23:11,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:11,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:11,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51588 deadline: 1733239451140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:11,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51618 deadline: 1733239451140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:11,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51614 deadline: 1733239451140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:11,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-03T15:23:11,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51630 deadline: 1733239451142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:11,236 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/038a57574d0c4238a1a3b1d34387e685 2024-12-03T15:23:11,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/6817c42cac964b89aa6017bbd4158fc3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/6817c42cac964b89aa6017bbd4158fc3 2024-12-03T15:23:11,242 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/6817c42cac964b89aa6017bbd4158fc3, entries=150, sequenceid=291, filesize=30.5 K 2024-12-03T15:23:11,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/5c7f66da3d7248d293195bab931ff93a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/5c7f66da3d7248d293195bab931ff93a 2024-12-03T15:23:11,245 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/5c7f66da3d7248d293195bab931ff93a, entries=150, sequenceid=291, filesize=12.0 K 2024-12-03T15:23:11,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/038a57574d0c4238a1a3b1d34387e685 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/038a57574d0c4238a1a3b1d34387e685 2024-12-03T15:23:11,247 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/038a57574d0c4238a1a3b1d34387e685, entries=150, sequenceid=291, filesize=12.0 K 2024-12-03T15:23:11,248 INFO [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 365d5514a5bb1b4ce69a418d0f3e9934 in 1646ms, sequenceid=291, compaction requested=false 2024-12-03T15:23:11,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:11,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:11,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/2b5ef621a0dd:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-12-03T15:23:11,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-03T15:23:11,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-12-03T15:23:11,250 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-03T15:23:11,250 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1040 sec 2024-12-03T15:23:11,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.1070 sec 2024-12-03T15:23:11,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46815 {}] regionserver.HRegion(8581): Flush requested on 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:11,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-03T15:23:11,878 DEBUG [Thread-2540 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58971172 to 127.0.0.1:60989 2024-12-03T15:23:11,878 DEBUG [Thread-2540 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:11,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:23:11,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:11,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:23:11,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:11,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:23:11,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:11,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203f28ed6b74dd745af95041604d0fa26af_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_0/A:col10/1733239390025/Put/seqid=0 2024-12-03T15:23:11,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742483_1659 (size=12454) 2024-12-03T15:23:12,143 DEBUG [Thread-2534 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x103dfc6e to 127.0.0.1:60989 2024-12-03T15:23:12,143 DEBUG [Thread-2534 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:12,143 DEBUG [Thread-2532 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06094c70 to 127.0.0.1:60989 2024-12-03T15:23:12,143 DEBUG [Thread-2532 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:12,148 DEBUG [Thread-2538 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60d631a3 to 127.0.0.1:60989 2024-12-03T15:23:12,148 DEBUG [Thread-2538 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:12,149 DEBUG [Thread-2536 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e047c09 to 127.0.0.1:60989 2024-12-03T15:23:12,149 DEBUG [Thread-2536 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:12,286 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:12,288 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203f28ed6b74dd745af95041604d0fa26af_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203f28ed6b74dd745af95041604d0fa26af_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:12,289 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/a7d30229750e484f970df351a800d6e8, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:12,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/a7d30229750e484f970df351a800d6e8 is 175, key is test_row_0/A:col10/1733239390025/Put/seqid=0 2024-12-03T15:23:12,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742484_1660 (size=31255) 2024-12-03T15:23:12,692 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=321, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/a7d30229750e484f970df351a800d6e8 2024-12-03T15:23:12,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/304057fdaaae42eb9e8bc5ee3ae14dab is 50, key is test_row_0/B:col10/1733239390025/Put/seqid=0 2024-12-03T15:23:12,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742485_1661 (size=12301) 2024-12-03T15:23:13,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/304057fdaaae42eb9e8bc5ee3ae14dab 2024-12-03T15:23:13,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/c58f98d07c9c44b494d02b5381f89bb1 is 50, key is test_row_0/C:col10/1733239390025/Put/seqid=0 2024-12-03T15:23:13,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742486_1662 (size=12301) 2024-12-03T15:23:13,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-03T15:23:13,249 INFO [Thread-2542 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 17 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6551 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6650 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6287 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6572 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6558 2024-12-03T15:23:13,249 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-03T15:23:13,249 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-03T15:23:13,249 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x537a66f8 to 127.0.0.1:60989 2024-12-03T15:23:13,250 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:13,250 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-03T15:23:13,250 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-03T15:23:13,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-03T15:23:13,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-03T15:23:13,252 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239393252"}]},"ts":"1733239393252"} 2024-12-03T15:23:13,253 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-03T15:23:13,255 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-03T15:23:13,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-03T15:23:13,256 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=365d5514a5bb1b4ce69a418d0f3e9934, UNASSIGN}] 2024-12-03T15:23:13,257 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=168, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=365d5514a5bb1b4ce69a418d0f3e9934, UNASSIGN 2024-12-03T15:23:13,257 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=169 updating hbase:meta row=365d5514a5bb1b4ce69a418d0f3e9934, regionState=CLOSING, regionLocation=2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:13,258 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-03T15:23:13,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; CloseRegionProcedure 365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292}] 2024-12-03T15:23:13,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-03T15:23:13,409 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:13,410 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(124): Close 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:13,410 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-03T15:23:13,410 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1681): Closing 365d5514a5bb1b4ce69a418d0f3e9934, disabling compactions & flushes 2024-12-03T15:23:13,410 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:13,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/c58f98d07c9c44b494d02b5381f89bb1 2024-12-03T15:23:13,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/a7d30229750e484f970df351a800d6e8 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/a7d30229750e484f970df351a800d6e8 2024-12-03T15:23:13,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/a7d30229750e484f970df351a800d6e8, entries=150, sequenceid=321, filesize=30.5 K 2024-12-03T15:23:13,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/304057fdaaae42eb9e8bc5ee3ae14dab as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/304057fdaaae42eb9e8bc5ee3ae14dab 2024-12-03T15:23:13,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/304057fdaaae42eb9e8bc5ee3ae14dab, entries=150, sequenceid=321, filesize=12.0 K 2024-12-03T15:23:13,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/c58f98d07c9c44b494d02b5381f89bb1 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/c58f98d07c9c44b494d02b5381f89bb1 2024-12-03T15:23:13,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/c58f98d07c9c44b494d02b5381f89bb1, entries=150, sequenceid=321, filesize=12.0 K 2024-12-03T15:23:13,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=26.84 KB/27480 for 365d5514a5bb1b4ce69a418d0f3e9934 in 1641ms, sequenceid=321, compaction requested=true 2024-12-03T15:23:13,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:13,520 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:13,520 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:13,520 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. after waiting 0 ms 2024-12-03T15:23:13,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:A, priority=-2147483648, current under compaction store size is 1 2024-12-03T15:23:13,520 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:13,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:13,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:B, priority=-2147483648, current under compaction store size is 2 2024-12-03T15:23:13,520 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. because compaction request was cancelled 2024-12-03T15:23:13,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-03T15:23:13,520 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:A 2024-12-03T15:23:13,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 365d5514a5bb1b4ce69a418d0f3e9934:C, priority=-2147483648, current under compaction store size is 3 2024-12-03T15:23:13,520 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(2837): Flushing 365d5514a5bb1b4ce69a418d0f3e9934 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-03T15:23:13,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-03T15:23:13,520 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. because compaction request was cancelled 2024-12-03T15:23:13,520 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. because compaction request was cancelled 2024-12-03T15:23:13,520 DEBUG [RS:0;2b5ef621a0dd:46815-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:C 2024-12-03T15:23:13,520 DEBUG [RS:0;2b5ef621a0dd:46815-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 365d5514a5bb1b4ce69a418d0f3e9934:B 2024-12-03T15:23:13,520 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=A 2024-12-03T15:23:13,520 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:13,520 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=B 2024-12-03T15:23:13,520 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:13,520 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 365d5514a5bb1b4ce69a418d0f3e9934, store=C 2024-12-03T15:23:13,520 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-03T15:23:13,524 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203abaec099e42b4a2085d7ca0a9bc46ac7_365d5514a5bb1b4ce69a418d0f3e9934 is 50, key is test_row_1/A:col10/1733239392148/Put/seqid=0 2024-12-03T15:23:13,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742487_1663 (size=9914) 2024-12-03T15:23:13,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-03T15:23:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-03T15:23:13,927 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-03T15:23:13,929 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241203abaec099e42b4a2085d7ca0a9bc46ac7_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203abaec099e42b4a2085d7ca0a9bc46ac7_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:13,930 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/713ed0cce26f4c7e95fe1b6c5e2aee34, store: [table=TestAcidGuarantees family=A region=365d5514a5bb1b4ce69a418d0f3e9934] 2024-12-03T15:23:13,930 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/713ed0cce26f4c7e95fe1b6c5e2aee34 is 175, key is test_row_1/A:col10/1733239392148/Put/seqid=0 2024-12-03T15:23:13,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742488_1664 (size=22561) 2024-12-03T15:23:14,334 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=328, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/713ed0cce26f4c7e95fe1b6c5e2aee34 2024-12-03T15:23:14,338 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/8f1409cd375044bbbb15468a65fe536f is 50, key is test_row_1/B:col10/1733239392148/Put/seqid=0 2024-12-03T15:23:14,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742489_1665 (size=9857) 2024-12-03T15:23:14,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-03T15:23:14,741 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/8f1409cd375044bbbb15468a65fe536f 2024-12-03T15:23:14,746 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/ae088f80ad9d4993affddf81ff4ca34a is 50, key is test_row_1/C:col10/1733239392148/Put/seqid=0 2024-12-03T15:23:14,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742490_1666 (size=9857) 2024-12-03T15:23:15,149 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/ae088f80ad9d4993affddf81ff4ca34a 2024-12-03T15:23:15,152 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/A/713ed0cce26f4c7e95fe1b6c5e2aee34 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/713ed0cce26f4c7e95fe1b6c5e2aee34 2024-12-03T15:23:15,154 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/713ed0cce26f4c7e95fe1b6c5e2aee34, entries=100, sequenceid=328, filesize=22.0 K 2024-12-03T15:23:15,155 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/B/8f1409cd375044bbbb15468a65fe536f as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/8f1409cd375044bbbb15468a65fe536f 2024-12-03T15:23:15,157 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/8f1409cd375044bbbb15468a65fe536f, entries=100, sequenceid=328, filesize=9.6 K 2024-12-03T15:23:15,157 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/.tmp/C/ae088f80ad9d4993affddf81ff4ca34a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/ae088f80ad9d4993affddf81ff4ca34a 2024-12-03T15:23:15,160 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/ae088f80ad9d4993affddf81ff4ca34a, entries=100, sequenceid=328, filesize=9.6 K 2024-12-03T15:23:15,160 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 365d5514a5bb1b4ce69a418d0f3e9934 in 1640ms, sequenceid=328, compaction requested=true 2024-12-03T15:23:15,161 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/38d631c324374dedaeeb124def735ba2, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/60364f25e1054e73986ee10f0ad6f5d9, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/503b4cff08e243288dd7332d78bbbc2d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/dfc386aa5ec548a89dc1f0c7534b5f44, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2ac3cafd7c914787a985b04e810a810c, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/c1e4b9a8bfbc4ffb9c303732e5ce3a60, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/0e0ce537095e44a59ab8184a1fef7c44, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/d9fb26d871d940268a5af641b7351e77, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e23957a295704dc6990c1d5c7c58e172, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/f25a33c1d9ca4a53868b272e163676aa, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/bf0713e8d5d7449ebe5fe90432e06d9e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/85e7fd117998400f9b11f0ecdc482be3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e76b7f089a674f6ea7a57d4e165ddb02, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/901a6f08c01c42f3b790356cf6ef090e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2277b0aaa8fc4f6e815d5451e6d48a0b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/ec631d47428c4692b2df1aab21fbbf6e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/693d43288c074c468571be115c7c7840, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/238892ce4533470c95a4b19c766dcc41, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/1784dc7940bc41c692d25c837364b60a] to archive 2024-12-03T15:23:15,161 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:23:15,162 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/38d631c324374dedaeeb124def735ba2 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/38d631c324374dedaeeb124def735ba2 2024-12-03T15:23:15,163 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/60364f25e1054e73986ee10f0ad6f5d9 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/60364f25e1054e73986ee10f0ad6f5d9 2024-12-03T15:23:15,164 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/503b4cff08e243288dd7332d78bbbc2d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/503b4cff08e243288dd7332d78bbbc2d 2024-12-03T15:23:15,165 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/dfc386aa5ec548a89dc1f0c7534b5f44 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/dfc386aa5ec548a89dc1f0c7534b5f44 2024-12-03T15:23:15,166 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2ac3cafd7c914787a985b04e810a810c to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2ac3cafd7c914787a985b04e810a810c 2024-12-03T15:23:15,167 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/c1e4b9a8bfbc4ffb9c303732e5ce3a60 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/c1e4b9a8bfbc4ffb9c303732e5ce3a60 2024-12-03T15:23:15,167 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/0e0ce537095e44a59ab8184a1fef7c44 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/0e0ce537095e44a59ab8184a1fef7c44 2024-12-03T15:23:15,168 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/d9fb26d871d940268a5af641b7351e77 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/d9fb26d871d940268a5af641b7351e77 2024-12-03T15:23:15,169 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e23957a295704dc6990c1d5c7c58e172 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e23957a295704dc6990c1d5c7c58e172 2024-12-03T15:23:15,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/f25a33c1d9ca4a53868b272e163676aa to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/f25a33c1d9ca4a53868b272e163676aa 2024-12-03T15:23:15,170 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/bf0713e8d5d7449ebe5fe90432e06d9e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/bf0713e8d5d7449ebe5fe90432e06d9e 2024-12-03T15:23:15,171 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/85e7fd117998400f9b11f0ecdc482be3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/85e7fd117998400f9b11f0ecdc482be3 2024-12-03T15:23:15,172 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e76b7f089a674f6ea7a57d4e165ddb02 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/e76b7f089a674f6ea7a57d4e165ddb02 2024-12-03T15:23:15,172 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/901a6f08c01c42f3b790356cf6ef090e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/901a6f08c01c42f3b790356cf6ef090e 2024-12-03T15:23:15,173 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2277b0aaa8fc4f6e815d5451e6d48a0b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/2277b0aaa8fc4f6e815d5451e6d48a0b 2024-12-03T15:23:15,174 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/ec631d47428c4692b2df1aab21fbbf6e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/ec631d47428c4692b2df1aab21fbbf6e 2024-12-03T15:23:15,175 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/693d43288c074c468571be115c7c7840 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/693d43288c074c468571be115c7c7840 2024-12-03T15:23:15,176 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/238892ce4533470c95a4b19c766dcc41 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/238892ce4533470c95a4b19c766dcc41 2024-12-03T15:23:15,176 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/1784dc7940bc41c692d25c837364b60a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/1784dc7940bc41c692d25c837364b60a 2024-12-03T15:23:15,177 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/cdfc52f310bc410aa7f59f806eec3f1f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/afa3aba9c2ac4fcfb89a0b37d1b16f39, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/85302b7c0f44456dab93a28f514fb27b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/6d82bcf54ad54dd99cd93dde6b45d559, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/d341e3098b78459b991a075e8db662fe, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/0e7503e847174867abc634f4c2142ca1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/2ca2bdf14c4a41b9ab92caf4551fa1aa, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/043f3b299472450cbe2182485e2c4b42, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/2191b64233fc43c19a4c2a33dab3809d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/1f3127b6724e4d73ba2aa50398093fc8, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/9586fbc2c92747b683dc6f43a1b7e9eb, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c7b8b29374744e91af3858a0a916cd33, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/459fc0d879e542929588a690349eb780, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/6e78c376b8954caaaca68e69bf47c73a, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/d95d4a415d814191910317c199d47a69, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c98f13d1881946efbe6cdcd79aed2c1f, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/684dcd1fb33d46289a7157aa29b96260, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/cb96e92e06cd4ad3bdf290e54d776060, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/a77a592a204c46278f7f2d21519ab059] to archive 2024-12-03T15:23:15,178 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:23:15,179 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/cdfc52f310bc410aa7f59f806eec3f1f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/cdfc52f310bc410aa7f59f806eec3f1f 2024-12-03T15:23:15,180 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/afa3aba9c2ac4fcfb89a0b37d1b16f39 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/afa3aba9c2ac4fcfb89a0b37d1b16f39 2024-12-03T15:23:15,181 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/85302b7c0f44456dab93a28f514fb27b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/85302b7c0f44456dab93a28f514fb27b 2024-12-03T15:23:15,181 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/6d82bcf54ad54dd99cd93dde6b45d559 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/6d82bcf54ad54dd99cd93dde6b45d559 2024-12-03T15:23:15,182 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/d341e3098b78459b991a075e8db662fe to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/d341e3098b78459b991a075e8db662fe 2024-12-03T15:23:15,183 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/0e7503e847174867abc634f4c2142ca1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/0e7503e847174867abc634f4c2142ca1 2024-12-03T15:23:15,183 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/2ca2bdf14c4a41b9ab92caf4551fa1aa to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/2ca2bdf14c4a41b9ab92caf4551fa1aa 2024-12-03T15:23:15,184 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/043f3b299472450cbe2182485e2c4b42 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/043f3b299472450cbe2182485e2c4b42 2024-12-03T15:23:15,185 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/2191b64233fc43c19a4c2a33dab3809d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/2191b64233fc43c19a4c2a33dab3809d 2024-12-03T15:23:15,186 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/1f3127b6724e4d73ba2aa50398093fc8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/1f3127b6724e4d73ba2aa50398093fc8 2024-12-03T15:23:15,186 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/9586fbc2c92747b683dc6f43a1b7e9eb to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/9586fbc2c92747b683dc6f43a1b7e9eb 2024-12-03T15:23:15,187 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c7b8b29374744e91af3858a0a916cd33 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c7b8b29374744e91af3858a0a916cd33 2024-12-03T15:23:15,188 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/459fc0d879e542929588a690349eb780 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/459fc0d879e542929588a690349eb780 2024-12-03T15:23:15,188 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/6e78c376b8954caaaca68e69bf47c73a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/6e78c376b8954caaaca68e69bf47c73a 2024-12-03T15:23:15,189 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/d95d4a415d814191910317c199d47a69 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/d95d4a415d814191910317c199d47a69 2024-12-03T15:23:15,190 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c98f13d1881946efbe6cdcd79aed2c1f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c98f13d1881946efbe6cdcd79aed2c1f 2024-12-03T15:23:15,191 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/684dcd1fb33d46289a7157aa29b96260 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/684dcd1fb33d46289a7157aa29b96260 2024-12-03T15:23:15,191 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/cb96e92e06cd4ad3bdf290e54d776060 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/cb96e92e06cd4ad3bdf290e54d776060 2024-12-03T15:23:15,192 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/a77a592a204c46278f7f2d21519ab059 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/a77a592a204c46278f7f2d21519ab059 2024-12-03T15:23:15,193 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/972d3dbfbda14b7bbccedb391f0c527e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/dc2f7f1b5bd94e20ab4eac8b5a8fe665, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/351e223d6a3c430ab4465457b5f84630, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/f40d261b8d74481aba2b73c65a34b332, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/b2ba7c3ae72249b7b89302e0b318594d, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/badf67c079234d21b1ea7241d92f654e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/fa54485d66334615b093421ba9ce44b7, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/fd278a6c3d28461099bb0679025532b3, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/878fe19176ce41b1b3cfb7ff291ab1c1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/f6a5843c8b54467a94dbedb32a924ed1, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/ef7bf78c174c425188b2d703d597c233, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/98b88daba9a14e9a893cdbeeab202b55, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/afd17a064567420b856e95e6d4f08ffd, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/437cbf1c91594d8e9aad2e6a23997eba, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/dec88a85d6a8476da21856d180ab476b, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/d65d8c06782e413998395c3458bef580, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/3107f31b85dc454a9e5310aa8d637059, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/7291994cf5334afc933da7e15c6e4a5e, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/102b3bc79f794c42b5b9a5d23051acf7] to archive 2024-12-03T15:23:15,193 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-03T15:23:15,194 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/972d3dbfbda14b7bbccedb391f0c527e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/972d3dbfbda14b7bbccedb391f0c527e 2024-12-03T15:23:15,195 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/dc2f7f1b5bd94e20ab4eac8b5a8fe665 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/dc2f7f1b5bd94e20ab4eac8b5a8fe665 2024-12-03T15:23:15,196 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/351e223d6a3c430ab4465457b5f84630 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/351e223d6a3c430ab4465457b5f84630 2024-12-03T15:23:15,197 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/f40d261b8d74481aba2b73c65a34b332 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/f40d261b8d74481aba2b73c65a34b332 2024-12-03T15:23:15,197 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/b2ba7c3ae72249b7b89302e0b318594d to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/b2ba7c3ae72249b7b89302e0b318594d 2024-12-03T15:23:15,198 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/badf67c079234d21b1ea7241d92f654e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/badf67c079234d21b1ea7241d92f654e 2024-12-03T15:23:15,199 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/fa54485d66334615b093421ba9ce44b7 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/fa54485d66334615b093421ba9ce44b7 2024-12-03T15:23:15,199 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/fd278a6c3d28461099bb0679025532b3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/fd278a6c3d28461099bb0679025532b3 2024-12-03T15:23:15,200 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/878fe19176ce41b1b3cfb7ff291ab1c1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/878fe19176ce41b1b3cfb7ff291ab1c1 2024-12-03T15:23:15,201 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/f6a5843c8b54467a94dbedb32a924ed1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/f6a5843c8b54467a94dbedb32a924ed1 2024-12-03T15:23:15,201 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/ef7bf78c174c425188b2d703d597c233 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/ef7bf78c174c425188b2d703d597c233 2024-12-03T15:23:15,202 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/98b88daba9a14e9a893cdbeeab202b55 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/98b88daba9a14e9a893cdbeeab202b55 2024-12-03T15:23:15,203 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/afd17a064567420b856e95e6d4f08ffd to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/afd17a064567420b856e95e6d4f08ffd 2024-12-03T15:23:15,204 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/437cbf1c91594d8e9aad2e6a23997eba to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/437cbf1c91594d8e9aad2e6a23997eba 2024-12-03T15:23:15,204 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/dec88a85d6a8476da21856d180ab476b to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/dec88a85d6a8476da21856d180ab476b 2024-12-03T15:23:15,205 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/d65d8c06782e413998395c3458bef580 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/d65d8c06782e413998395c3458bef580 2024-12-03T15:23:15,206 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/3107f31b85dc454a9e5310aa8d637059 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/3107f31b85dc454a9e5310aa8d637059 2024-12-03T15:23:15,207 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/7291994cf5334afc933da7e15c6e4a5e to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/7291994cf5334afc933da7e15c6e4a5e 2024-12-03T15:23:15,207 DEBUG [StoreCloser-TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/102b3bc79f794c42b5b9a5d23051acf7 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/102b3bc79f794c42b5b9a5d23051acf7 2024-12-03T15:23:15,211 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/recovered.edits/331.seqid, newMaxSeqId=331, maxSeqId=4 2024-12-03T15:23:15,211 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934. 2024-12-03T15:23:15,211 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1635): Region close journal for 365d5514a5bb1b4ce69a418d0f3e9934: 2024-12-03T15:23:15,212 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(170): Closed 365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,213 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=169 updating hbase:meta row=365d5514a5bb1b4ce69a418d0f3e9934, regionState=CLOSED 2024-12-03T15:23:15,214 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-03T15:23:15,214 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseRegionProcedure 365d5514a5bb1b4ce69a418d0f3e9934, server=2b5ef621a0dd,46815,1733239226292 in 1.9550 sec 2024-12-03T15:23:15,215 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-12-03T15:23:15,215 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=365d5514a5bb1b4ce69a418d0f3e9934, UNASSIGN in 1.9580 sec 2024-12-03T15:23:15,216 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-03T15:23:15,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9600 sec 2024-12-03T15:23:15,217 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733239395217"}]},"ts":"1733239395217"} 2024-12-03T15:23:15,218 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-03T15:23:15,220 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-03T15:23:15,221 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9700 sec 2024-12-03T15:23:15,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-03T15:23:15,355 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-03T15:23:15,356 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-03T15:23:15,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:23:15,357 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:23:15,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-03T15:23:15,358 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=171, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:23:15,360 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,361 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C, FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/recovered.edits] 2024-12-03T15:23:15,363 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/6817c42cac964b89aa6017bbd4158fc3 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/6817c42cac964b89aa6017bbd4158fc3 2024-12-03T15:23:15,364 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/70913925122145d4a15d848560dd0551 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/70913925122145d4a15d848560dd0551 2024-12-03T15:23:15,365 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/713ed0cce26f4c7e95fe1b6c5e2aee34 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/713ed0cce26f4c7e95fe1b6c5e2aee34 2024-12-03T15:23:15,366 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/a7d30229750e484f970df351a800d6e8 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/A/a7d30229750e484f970df351a800d6e8 2024-12-03T15:23:15,368 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/304057fdaaae42eb9e8bc5ee3ae14dab to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/304057fdaaae42eb9e8bc5ee3ae14dab 2024-12-03T15:23:15,368 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/5c7f66da3d7248d293195bab931ff93a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/5c7f66da3d7248d293195bab931ff93a 2024-12-03T15:23:15,369 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/8f1409cd375044bbbb15468a65fe536f to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/8f1409cd375044bbbb15468a65fe536f 2024-12-03T15:23:15,370 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c327374fb1c24361846ec95ec9d95a22 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/B/c327374fb1c24361846ec95ec9d95a22 2024-12-03T15:23:15,372 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/038a57574d0c4238a1a3b1d34387e685 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/038a57574d0c4238a1a3b1d34387e685 2024-12-03T15:23:15,372 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/08c7fb7422d0484486594befb2dfa171 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/08c7fb7422d0484486594befb2dfa171 2024-12-03T15:23:15,373 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/ae088f80ad9d4993affddf81ff4ca34a to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/ae088f80ad9d4993affddf81ff4ca34a 2024-12-03T15:23:15,374 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/c58f98d07c9c44b494d02b5381f89bb1 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/C/c58f98d07c9c44b494d02b5381f89bb1 2024-12-03T15:23:15,376 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/recovered.edits/331.seqid to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934/recovered.edits/331.seqid 2024-12-03T15:23:15,377 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/default/TestAcidGuarantees/365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,377 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-03T15:23:15,377 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-03T15:23:15,378 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-03T15:23:15,380 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120303fb417030174eb0a648175542f45980_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120303fb417030174eb0a648175542f45980_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,380 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412030e7806a8112d49929d6c3859d750337c_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412030e7806a8112d49929d6c3859d750337c_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,381 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412031824e1a7938d4ed4af1b0be93a5e1648_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412031824e1a7938d4ed4af1b0be93a5e1648_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,382 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412037ea0ae084b6744b78355d6879faec200_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412037ea0ae084b6744b78355d6879faec200_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,383 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120388682fdd3c2f47eb8257c1f77f42af2e_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120388682fdd3c2f47eb8257c1f77f42af2e_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,384 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203893a43351f434b0c85a341bdf1fef626_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203893a43351f434b0c85a341bdf1fef626_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,385 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412038ed018eba3e94ceb94c7da21941ca698_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412038ed018eba3e94ceb94c7da21941ca698_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,386 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120397264fa80af747d3bec3613bfba6aef5_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120397264fa80af747d3bec3613bfba6aef5_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,386 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203abaec099e42b4a2085d7ca0a9bc46ac7_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203abaec099e42b4a2085d7ca0a9bc46ac7_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,387 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203b16b1030d4174524ad8317391f832ea4_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203b16b1030d4174524ad8317391f832ea4_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,388 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203bc07c3a494e044048e07dec585f64dad_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203bc07c3a494e044048e07dec585f64dad_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,389 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203c21f0b322c7543df9134e982a0d36452_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203c21f0b322c7543df9134e982a0d36452_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,390 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203c8866e70a1b142888b686bf1c8a49465_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203c8866e70a1b142888b686bf1c8a49465_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,391 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203e94703ff41ec4b4d90ff80053c95ad93_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203e94703ff41ec4b4d90ff80053c95ad93_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,391 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203ecbb0c4edf77467fb4459fa8817b2d8e_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203ecbb0c4edf77467fb4459fa8817b2d8e_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,392 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203f28ed6b74dd745af95041604d0fa26af_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203f28ed6b74dd745af95041604d0fa26af_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,393 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203f6126583e3314b499b914e7f067288a8_365d5514a5bb1b4ce69a418d0f3e9934 to hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241203f6126583e3314b499b914e7f067288a8_365d5514a5bb1b4ce69a418d0f3e9934 2024-12-03T15:23:15,393 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-03T15:23:15,395 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=171, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:23:15,397 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-03T15:23:15,398 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-03T15:23:15,399 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=171, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:23:15,399 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-03T15:23:15,399 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733239395399"}]},"ts":"9223372036854775807"} 2024-12-03T15:23:15,400 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-03T15:23:15,400 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 365d5514a5bb1b4ce69a418d0f3e9934, NAME => 'TestAcidGuarantees,,1733239367991.365d5514a5bb1b4ce69a418d0f3e9934.', STARTKEY => '', ENDKEY => ''}] 2024-12-03T15:23:15,401 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-03T15:23:15,401 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733239395401"}]},"ts":"9223372036854775807"} 2024-12-03T15:23:15,402 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-03T15:23:15,403 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=171, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-03T15:23:15,404 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 47 msec 2024-12-03T15:23:15,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36539 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-03T15:23:15,459 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-03T15:23:15,468 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=241 (was 241), OpenFileDescriptor=459 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=783 (was 853), ProcessCount=9 (was 11), AvailableMemoryMB=2079 (was 1739) - AvailableMemoryMB LEAK? - 2024-12-03T15:23:15,468 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-03T15:23:15,468 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-03T15:23:15,469 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e83c466 to 127.0.0.1:60989 2024-12-03T15:23:15,469 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:15,469 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-03T15:23:15,469 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1610069381, stopped=false 2024-12-03T15:23:15,469 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=2b5ef621a0dd,36539,1733239225577 2024-12-03T15:23:15,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T15:23:15,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-03T15:23:15,471 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-03T15:23:15,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:23:15,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:23:15,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:15,471 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '2b5ef621a0dd,46815,1733239226292' ***** 2024-12-03T15:23:15,471 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-03T15:23:15,472 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-03T15:23:15,472 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T15:23:15,472 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-03T15:23:15,472 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-03T15:23:15,472 INFO [RS:0;2b5ef621a0dd:46815 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-03T15:23:15,472 INFO [RS:0;2b5ef621a0dd:46815 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-03T15:23:15,472 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(3579): Received CLOSE for 31c39c5a8622ff80b89b6cf13dfade9c 2024-12-03T15:23:15,473 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1224): stopping server 2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:15,473 DEBUG [RS:0;2b5ef621a0dd:46815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:15,473 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-03T15:23:15,473 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-03T15:23:15,473 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-03T15:23:15,473 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-03T15:23:15,473 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 31c39c5a8622ff80b89b6cf13dfade9c, disabling compactions & flushes 2024-12-03T15:23:15,473 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. 2024-12-03T15:23:15,473 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. 2024-12-03T15:23:15,473 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-03T15:23:15,473 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. after waiting 0 ms 2024-12-03T15:23:15,473 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. 2024-12-03T15:23:15,473 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1603): Online Regions={31c39c5a8622ff80b89b6cf13dfade9c=hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c., 1588230740=hbase:meta,,1.1588230740} 2024-12-03T15:23:15,473 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 31c39c5a8622ff80b89b6cf13dfade9c 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-03T15:23:15,473 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-03T15:23:15,473 INFO [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-03T15:23:15,473 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-03T15:23:15,473 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-03T15:23:15,473 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-03T15:23:15,473 INFO [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-03T15:23:15,474 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 31c39c5a8622ff80b89b6cf13dfade9c 2024-12-03T15:23:15,489 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/namespace/31c39c5a8622ff80b89b6cf13dfade9c/.tmp/info/f9b8101fc9e34bbf90387d2133b3afd0 is 45, key is default/info:d/1733239230124/Put/seqid=0 2024-12-03T15:23:15,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742491_1667 (size=5037) 2024-12-03T15:23:15,493 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/.tmp/info/f9ed82d94027420987069cbbdf8d8860 is 143, key is hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c./info:regioninfo/1733239230004/Put/seqid=0 2024-12-03T15:23:15,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742492_1668 (size=7725) 2024-12-03T15:23:15,499 INFO [regionserver/2b5ef621a0dd:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T15:23:15,674 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 31c39c5a8622ff80b89b6cf13dfade9c 2024-12-03T15:23:15,874 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 31c39c5a8622ff80b89b6cf13dfade9c 2024-12-03T15:23:15,892 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/namespace/31c39c5a8622ff80b89b6cf13dfade9c/.tmp/info/f9b8101fc9e34bbf90387d2133b3afd0 2024-12-03T15:23:15,895 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/namespace/31c39c5a8622ff80b89b6cf13dfade9c/.tmp/info/f9b8101fc9e34bbf90387d2133b3afd0 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/namespace/31c39c5a8622ff80b89b6cf13dfade9c/info/f9b8101fc9e34bbf90387d2133b3afd0 2024-12-03T15:23:15,897 INFO [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/.tmp/info/f9ed82d94027420987069cbbdf8d8860 2024-12-03T15:23:15,898 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/namespace/31c39c5a8622ff80b89b6cf13dfade9c/info/f9b8101fc9e34bbf90387d2133b3afd0, entries=2, sequenceid=6, filesize=4.9 K 2024-12-03T15:23:15,899 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 31c39c5a8622ff80b89b6cf13dfade9c in 426ms, sequenceid=6, compaction requested=false 2024-12-03T15:23:15,902 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/namespace/31c39c5a8622ff80b89b6cf13dfade9c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-03T15:23:15,902 INFO [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. 2024-12-03T15:23:15,903 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 31c39c5a8622ff80b89b6cf13dfade9c: 2024-12-03T15:23:15,903 DEBUG [RS_CLOSE_REGION-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733239229452.31c39c5a8622ff80b89b6cf13dfade9c. 2024-12-03T15:23:15,915 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/.tmp/rep_barrier/c9ee38fd16d247e9981bdec85bc97533 is 102, key is TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f./rep_barrier:/1733239255648/DeleteFamily/seqid=0 2024-12-03T15:23:15,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742493_1669 (size=6025) 2024-12-03T15:23:16,053 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-03T15:23:16,054 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-03T15:23:16,074 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-03T15:23:16,275 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-03T15:23:16,318 INFO [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/.tmp/rep_barrier/c9ee38fd16d247e9981bdec85bc97533 2024-12-03T15:23:16,337 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/.tmp/table/94cf4f4a44a34d5b8f38573408ce89c3 is 96, key is TestAcidGuarantees,,1733239230370.267a7e743c7c4973345ceaeae71cae1f./table:/1733239255648/DeleteFamily/seqid=0 2024-12-03T15:23:16,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742494_1670 (size=5942) 2024-12-03T15:23:16,445 INFO [regionserver/2b5ef621a0dd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-03T15:23:16,445 INFO [regionserver/2b5ef621a0dd:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-03T15:23:16,475 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-03T15:23:16,475 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-03T15:23:16,475 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-03T15:23:16,675 DEBUG [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-03T15:23:16,740 INFO [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/.tmp/table/94cf4f4a44a34d5b8f38573408ce89c3 2024-12-03T15:23:16,744 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/.tmp/info/f9ed82d94027420987069cbbdf8d8860 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/info/f9ed82d94027420987069cbbdf8d8860 2024-12-03T15:23:16,746 INFO [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/info/f9ed82d94027420987069cbbdf8d8860, entries=22, sequenceid=93, filesize=7.5 K 2024-12-03T15:23:16,747 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/.tmp/rep_barrier/c9ee38fd16d247e9981bdec85bc97533 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/rep_barrier/c9ee38fd16d247e9981bdec85bc97533 2024-12-03T15:23:16,749 INFO [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/rep_barrier/c9ee38fd16d247e9981bdec85bc97533, entries=6, sequenceid=93, filesize=5.9 K 2024-12-03T15:23:16,750 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/.tmp/table/94cf4f4a44a34d5b8f38573408ce89c3 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/table/94cf4f4a44a34d5b8f38573408ce89c3 2024-12-03T15:23:16,752 INFO [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/table/94cf4f4a44a34d5b8f38573408ce89c3, entries=9, sequenceid=93, filesize=5.8 K 2024-12-03T15:23:16,753 INFO [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1280ms, sequenceid=93, compaction requested=false 2024-12-03T15:23:16,756 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-03T15:23:16,756 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-03T15:23:16,756 INFO [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-03T15:23:16,756 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-03T15:23:16,757 DEBUG [RS_CLOSE_META-regionserver/2b5ef621a0dd:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-03T15:23:16,875 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1250): stopping server 2b5ef621a0dd,46815,1733239226292; all regions closed. 2024-12-03T15:23:16,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741834_1010 (size=26050) 2024-12-03T15:23:16,881 DEBUG [RS:0;2b5ef621a0dd:46815 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/oldWALs 2024-12-03T15:23:16,881 INFO [RS:0;2b5ef621a0dd:46815 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 2b5ef621a0dd%2C46815%2C1733239226292.meta:.meta(num 1733239228985) 2024-12-03T15:23:16,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741833_1009 (size=15407997) 2024-12-03T15:23:16,884 DEBUG [RS:0;2b5ef621a0dd:46815 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/oldWALs 2024-12-03T15:23:16,884 INFO [RS:0;2b5ef621a0dd:46815 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 2b5ef621a0dd%2C46815%2C1733239226292:(num 1733239228579) 2024-12-03T15:23:16,884 DEBUG [RS:0;2b5ef621a0dd:46815 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:16,884 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.LeaseManager(133): Closed leases 2024-12-03T15:23:16,884 INFO [RS:0;2b5ef621a0dd:46815 {}] hbase.ChoreService(370): Chore service for: regionserver/2b5ef621a0dd:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-03T15:23:16,884 INFO [regionserver/2b5ef621a0dd:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T15:23:16,885 INFO [RS:0;2b5ef621a0dd:46815 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46815 2024-12-03T15:23:16,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-03T15:23:16,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/2b5ef621a0dd,46815,1733239226292 2024-12-03T15:23:16,890 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [2b5ef621a0dd,46815,1733239226292] 2024-12-03T15:23:16,890 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 2b5ef621a0dd,46815,1733239226292; numProcessing=1 2024-12-03T15:23:16,891 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/2b5ef621a0dd,46815,1733239226292 already deleted, retry=false 2024-12-03T15:23:16,891 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 2b5ef621a0dd,46815,1733239226292 expired; onlineServers=0 2024-12-03T15:23:16,891 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '2b5ef621a0dd,36539,1733239225577' ***** 2024-12-03T15:23:16,891 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-03T15:23:16,892 DEBUG [M:0;2b5ef621a0dd:36539 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d237f2d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=2b5ef621a0dd/172.17.0.2:0 2024-12-03T15:23:16,892 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegionServer(1224): stopping server 2b5ef621a0dd,36539,1733239225577 2024-12-03T15:23:16,892 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegionServer(1250): stopping server 2b5ef621a0dd,36539,1733239225577; all regions closed. 2024-12-03T15:23:16,892 DEBUG [M:0;2b5ef621a0dd:36539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-03T15:23:16,892 DEBUG [M:0;2b5ef621a0dd:36539 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-03T15:23:16,892 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-03T15:23:16,892 DEBUG [M:0;2b5ef621a0dd:36539 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-03T15:23:16,892 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster-HFileCleaner.large.0-1733239228216 {}] cleaner.HFileCleaner(306): Exit Thread[master/2b5ef621a0dd:0:becomeActiveMaster-HFileCleaner.large.0-1733239228216,5,FailOnTimeoutGroup] 2024-12-03T15:23:16,892 DEBUG [master/2b5ef621a0dd:0:becomeActiveMaster-HFileCleaner.small.0-1733239228218 {}] cleaner.HFileCleaner(306): Exit Thread[master/2b5ef621a0dd:0:becomeActiveMaster-HFileCleaner.small.0-1733239228218,5,FailOnTimeoutGroup] 2024-12-03T15:23:16,892 INFO [M:0;2b5ef621a0dd:36539 {}] hbase.ChoreService(370): Chore service for: master/2b5ef621a0dd:0 had [] on shutdown 2024-12-03T15:23:16,892 DEBUG [M:0;2b5ef621a0dd:36539 {}] master.HMaster(1733): Stopping service threads 2024-12-03T15:23:16,892 INFO [M:0;2b5ef621a0dd:36539 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-03T15:23:16,893 ERROR [M:0;2b5ef621a0dd:36539 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:44673 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:44673,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-12-03T15:23:16,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-03T15:23:16,893 INFO [M:0;2b5ef621a0dd:36539 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-03T15:23:16,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-03T15:23:16,893 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-03T15:23:16,894 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-03T15:23:16,894 DEBUG [M:0;2b5ef621a0dd:36539 {}] zookeeper.ZKUtil(347): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-03T15:23:16,894 WARN [M:0;2b5ef621a0dd:36539 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-03T15:23:16,894 INFO [M:0;2b5ef621a0dd:36539 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-03T15:23:16,894 INFO [M:0;2b5ef621a0dd:36539 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-03T15:23:16,894 DEBUG [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-03T15:23:16,894 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T15:23:16,894 DEBUG [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T15:23:16,894 DEBUG [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-03T15:23:16,894 DEBUG [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T15:23:16,894 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=778.50 KB heapSize=958.34 KB 2024-12-03T15:23:16,909 DEBUG [M:0;2b5ef621a0dd:36539 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b2774181f922403fba0f7184e5b2a82a is 82, key is hbase:meta,,1/info:regioninfo/1733239229209/Put/seqid=0 2024-12-03T15:23:16,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742495_1671 (size=5672) 2024-12-03T15:23:16,990 INFO [RS:0;2b5ef621a0dd:46815 {}] regionserver.HRegionServer(1307): Exiting; stopping=2b5ef621a0dd,46815,1733239226292; zookeeper connection closed. 2024-12-03T15:23:16,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T15:23:16,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46815-0x1009f6fd8b40001, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T15:23:16,990 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@d103c42 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@d103c42 2024-12-03T15:23:16,991 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-03T15:23:17,312 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2225 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b2774181f922403fba0f7184e5b2a82a 2024-12-03T15:23:17,332 DEBUG [M:0;2b5ef621a0dd:36539 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/25cc5005f1934119b1b4400f3258b3dd is 2283, key is \x00\x00\x00\x00\x00\x00\x00"/proc:d/1733239259060/Put/seqid=0 2024-12-03T15:23:17,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742496_1672 (size=43657) 2024-12-03T15:23:17,735 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=777.94 KB at sequenceid=2225 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/25cc5005f1934119b1b4400f3258b3dd 2024-12-03T15:23:17,738 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 25cc5005f1934119b1b4400f3258b3dd 2024-12-03T15:23:17,753 DEBUG [M:0;2b5ef621a0dd:36539 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ba362bb09c1448c82fc5c4de8dbbe03 is 69, key is 2b5ef621a0dd,46815,1733239226292/rs:state/1733239228268/Put/seqid=0 2024-12-03T15:23:17,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073742497_1673 (size=5156) 2024-12-03T15:23:18,156 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2225 (bloomFilter=true), to=hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ba362bb09c1448c82fc5c4de8dbbe03 2024-12-03T15:23:18,159 DEBUG [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b2774181f922403fba0f7184e5b2a82a as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b2774181f922403fba0f7184e5b2a82a 2024-12-03T15:23:18,162 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b2774181f922403fba0f7184e5b2a82a, entries=8, sequenceid=2225, filesize=5.5 K 2024-12-03T15:23:18,162 DEBUG [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/25cc5005f1934119b1b4400f3258b3dd as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/25cc5005f1934119b1b4400f3258b3dd 2024-12-03T15:23:18,164 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 25cc5005f1934119b1b4400f3258b3dd 2024-12-03T15:23:18,165 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/25cc5005f1934119b1b4400f3258b3dd, entries=171, sequenceid=2225, filesize=42.6 K 2024-12-03T15:23:18,165 DEBUG [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ba362bb09c1448c82fc5c4de8dbbe03 as hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0ba362bb09c1448c82fc5c4de8dbbe03 2024-12-03T15:23:18,168 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:44673/user/jenkins/test-data/855efb49-8e16-4415-584c-9170d34e1411/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0ba362bb09c1448c82fc5c4de8dbbe03, entries=1, sequenceid=2225, filesize=5.0 K 2024-12-03T15:23:18,168 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegion(3040): Finished flush of dataSize ~778.50 KB/797182, heapSize ~958.04 KB/981032, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1274ms, sequenceid=2225, compaction requested=false 2024-12-03T15:23:18,170 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-03T15:23:18,170 DEBUG [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-03T15:23:18,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32963 is added to blk_1073741830_1006 (size=943014) 2024-12-03T15:23:18,172 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-03T15:23:18,172 INFO [M:0;2b5ef621a0dd:36539 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-03T15:23:18,172 INFO [M:0;2b5ef621a0dd:36539 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36539 2024-12-03T15:23:18,174 DEBUG [M:0;2b5ef621a0dd:36539 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/2b5ef621a0dd,36539,1733239225577 already deleted, retry=false 2024-12-03T15:23:18,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T15:23:18,276 INFO [M:0;2b5ef621a0dd:36539 {}] regionserver.HRegionServer(1307): Exiting; stopping=2b5ef621a0dd,36539,1733239225577; zookeeper connection closed. 2024-12-03T15:23:18,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36539-0x1009f6fd8b40000, quorum=127.0.0.1:60989, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-03T15:23:18,280 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-03T15:23:18,282 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T15:23:18,283 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T15:23:18,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T15:23:18,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/hadoop.log.dir/,STOPPED} 2024-12-03T15:23:18,286 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-03T15:23:18,286 WARN [BP-1589599033-172.17.0.2-1733239222697 heartbeating to localhost/127.0.0.1:44673 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-03T15:23:18,286 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-03T15:23:18,286 WARN [BP-1589599033-172.17.0.2-1733239222697 heartbeating to localhost/127.0.0.1:44673 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1589599033-172.17.0.2-1733239222697 (Datanode Uuid a3ae884a-3479-4e2a-ad3f-1684a6ad8aaa) service to localhost/127.0.0.1:44673 2024-12-03T15:23:18,288 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/cluster_ee7c9693-9d66-43f8-9a07-adf8cdb8bef5/dfs/data/data1/current/BP-1589599033-172.17.0.2-1733239222697 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T15:23:18,288 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/cluster_ee7c9693-9d66-43f8-9a07-adf8cdb8bef5/dfs/data/data2/current/BP-1589599033-172.17.0.2-1733239222697 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-03T15:23:18,289 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-03T15:23:18,296 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-03T15:23:18,296 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-03T15:23:18,296 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-03T15:23:18,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-03T15:23:18,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/c14d321a-7e4f-2af7-4b55-eba00bd70cc5/hadoop.log.dir/,STOPPED} 2024-12-03T15:23:18,313 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-03T15:23:18,447 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down